Tried to run the new tipc stack through sparse.
Following patch fixes all cases where 0 was used
as replacement of NULL.
Use NULL to document this is a pointer and to silence sparse.
This brough sparse warning count down with 127 to 24 warnings.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Per Liden <per.liden@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
msg_set_bcast_tag(msg, tipc_own_tag);
- if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) {
+ if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
bcl->stats.sent_nacks++;
buf_discard(buf);
} else {
#define MAX_ADDR_STR 32
-static struct media *media_list = 0;
+static struct media *media_list = NULL;
static u32 media_count = 0;
-struct bearer *tipc_bearers = 0;
+struct bearer *tipc_bearers = NULL;
/**
* media_name_valid - validate media name
if (!strcmp(m_ptr->name, name))
return m_ptr;
}
- return 0;
+ return NULL;
}
/**
if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
return b_ptr;
}
- return 0;
+ return NULL;
}
/**
if (!strcmp(b_if_name, if_name))
return b_ptr;
}
- return 0;
+ return NULL;
}
/**
int tipc_block_bearer(const char *name)
{
- struct bearer *b_ptr = 0;
+ struct bearer *b_ptr = NULL;
struct link *l_ptr;
struct link *temp_l_ptr;
} else {
kfree(tipc_bearers);
kfree(media_list);
- tipc_bearers = 0;
- media_list = 0;
+ tipc_bearers = NULL;
+ media_list = NULL;
res = -ENOMEM;
}
write_unlock_bh(&tipc_net_lock);
}
kfree(tipc_bearers);
kfree(media_list);
- tipc_bearers = 0;
- media_list = 0;
+ tipc_bearers = NULL;
+ media_list = NULL;
media_count = 0;
}
u32 lower, u32 upper);
struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
-struct node **tipc_local_nodes = 0;
+struct node **tipc_local_nodes = NULL;
struct node_map tipc_cltr_bcast_nodes = {0,{0,}};
u32 tipc_highest_allowed_slave = 0;
c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
if (c_ptr == NULL)
- return 0;
+ return NULL;
memset(c_ptr, 0, sizeof(*c_ptr));
c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
if (c_ptr->nodes == NULL) {
kfree(c_ptr);
- return 0;
+ return NULL;
}
memset(c_ptr->nodes, 0, alloc);
if (in_own_cluster(addr))
}
else {
kfree(c_ptr);
- c_ptr = 0;
+ c_ptr = NULL;
}
return c_ptr;
assert(!in_own_cluster(c_ptr->addr));
if (!c_ptr->highest_node)
- return 0;
+ return NULL;
/* Start entry must be random */
while (mask > c_ptr->highest_node) {
if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
return c_ptr->nodes[n_num];
}
- return 0;
+ return NULL;
}
/*
if (z_ptr)
return z_ptr->clusters[1];
- return 0;
+ return NULL;
}
#endif
memset(&mng, 0, sizeof(mng));
INIT_LIST_HEAD(&mng.link_subscribers);
- res = tipc_attach(&mng.user_ref, 0, 0);
+ res = tipc_attach(&mng.user_ref, NULL, NULL);
if (res)
goto failed;
- res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE,
+ res = tipc_createport(mng.user_ref, NULL, TIPC_CRITICAL_IMPORTANCE,
NULL, NULL, NULL,
NULL, cfg_named_msg_event, NULL,
NULL, &mng.port_ref);
pb->crs = pb->buf = raw;
pb->size = sz;
- pb->next = 0;
+ pb->next = NULL;
pb->buf[0] = 0;
pb->buf[sz-1] = ~0;
}
}
}
pb_next = pb->next;
- pb->next = 0;
+ pb->next = NULL;
pb = pb_next;
}
spin_unlock_bh(&print_lock);
static void disable_bearer(struct tipc_bearer *tb_ptr)
{
- ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0;
+ ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = NULL;
}
/**
for (i = 0; i < MAX_ETH_BEARERS ; i++) {
if (eth_bearers[i].bearer) {
eth_bearers[i].bearer->blocked = 1;
- eth_bearers[i].bearer = 0;
+ eth_bearers[i].bearer = NULL;
}
if (eth_bearers[i].dev) {
dev_remove_pack(ð_bearers[i].tipc_packet_type);
if (win <= 0)
break;
list_del_init(&p_ptr->wait_list);
- p_ptr->congested_link = 0;
+ p_ptr->congested_link = NULL;
assert(p_ptr->wakeup);
spin_lock_bh(p_ptr->publ.lock);
p_ptr->publ.congested = 0;
fragm_crs = 0;
fragm_rest = 0;
sect_rest = 0;
- sect_crs = 0;
+ sect_crs = NULL;
curr_sect = -1;
/* Prepare reusable fragment header: */
msg_dbg(buf_msg(buf), ">DEF-PROT>");
l_ptr->unacked_window = 0;
buf_discard(buf);
- l_ptr->proto_msg_queue = 0;
+ l_ptr->proto_msg_queue = NULL;
return TIPC_OK;
} else {
msg_dbg(buf_msg(buf), "|>DEF-PROT>");
struct sk_buff **tail,
struct sk_buff *buf)
{
- struct sk_buff *prev = 0;
+ struct sk_buff *prev = NULL;
struct sk_buff *crs = *head;
u32 seq_no = msg_seqno(buf_msg(buf));
void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
{
- struct sk_buff *buf = 0;
+ struct sk_buff *buf = NULL;
struct tipc_msg *msg = l_ptr->pmsg;
u32 msg_size = sizeof(l_ptr->proto_msg);
}
}
exit:
- *buf = 0;
+ *buf = NULL;
buf_discard(tunnel_buf);
return 0;
}
int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
struct tipc_msg **m)
{
- struct sk_buff *prev = 0;
+ struct sk_buff *prev = NULL;
struct sk_buff *fbuf = *fb;
struct tipc_msg *fragm = buf_msg(fbuf);
struct sk_buff *pbuf = *pending;
u32 long_msg_seq_no = msg_long_msgno(fragm);
- *fb = 0;
+ *fb = NULL;
msg_dbg(fragm,"FRG<REC<");
/* Is there an incomplete message waiting for this fragment? */
static void link_check_defragm_bufs(struct link *l_ptr)
{
- struct sk_buff *prev = 0;
- struct sk_buff *next = 0;
+ struct sk_buff *prev = NULL;
+ struct sk_buff *next = NULL;
struct sk_buff *buf = l_ptr->defragm_buf;
if (!buf)
struct link *l_ptr;
if (!link_name_validate(name, &link_name_parts))
- return 0;
+ return NULL;
b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
if (!b_ptr)
- return 0;
+ return NULL;
*node = tipc_node_find(link_name_parts.addr_peer);
if (!*node)
- return 0;
+ return NULL;
l_ptr = (*node)->links[b_ptr->identity];
if (!l_ptr || strcmp(l_ptr->name, name))
- return 0;
+ return NULL;
return l_ptr;
}
void tipc_named_node_up(unsigned long node)
{
struct publication *publ;
- struct distr_item *item = 0;
- struct sk_buff *buf = 0;
+ struct distr_item *item = NULL;
+ struct sk_buff *buf = NULL;
u32 left = 0;
u32 rest;
u32 max_item_buf;
"<%u.%u.%u>\n", tipc_zone(node),
tipc_cluster(node), tipc_node(node));
tipc_link_send(buf, node, node);
- buf = 0;
+ buf = NULL;
}
}
exit:
(struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
if (publ == NULL) {
warn("Memory squeeze; failed to create publication\n");
- return 0;
+ return NULL;
}
memset(publ, 0, sizeof(*publ));
warn("Memory squeeze; failed to create name sequence\n");
kfree(nseq);
kfree(sseq);
- return 0;
+ return NULL;
}
memset(nseq, 0, sizeof(*nseq));
else
return &sseqs[mid];
}
- return 0;
+ return NULL;
}
/**
if ((sseq->lower != lower) || (sseq->upper != upper)) {
warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
- return 0;
+ return NULL;
}
} else {
u32 inspos;
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
- return 0;
+ return NULL;
}
/* Ensure there is space for new sub-sequence */
nseq->alloc *= 2;
} else {
warn("Memory squeeze; failed to create sub-sequence\n");
- return 0;
+ return NULL;
}
}
dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
publ = publ_create(type, lower, upper, scope, node, port, key);
if (!publ)
- return 0;
+ return NULL;
dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
publ, node, publ->node, publ->subscr.node);
i, &nseq->sseqs[i], nseq->sseqs[i].lower,
nseq->sseqs[i].upper);
}
- return 0;
+ return NULL;
}
dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
nseq, sseq, nseq->type, inst, key);
prev->zone_list_next = publ->zone_list_next;
sseq->zone_list = publ->zone_list_next;
} else {
- sseq->zone_list = 0;
+ sseq->zone_list = NULL;
}
if (in_own_cluster(node)) {
prev->cluster_list_next = publ->cluster_list_next;
sseq->cluster_list = publ->cluster_list_next;
} else {
- sseq->cluster_list = 0;
+ sseq->cluster_list = NULL;
}
}
prev->node_list_next = publ->node_list_next;
sseq->node_list = publ->node_list_next;
} else {
- sseq->node_list = 0;
+ sseq->node_list = NULL;
}
}
assert(!publ->node || (publ->node == node));
}
}
- return 0;
+ return NULL;
};
struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
if (lower > upper) {
warn("Failed to publish illegal <%u,%u,%u>\n",
type, lower, upper);
- return 0;
+ return NULL;
}
dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
dbg("tipc_nametbl_insert_publ: created %x\n", seq);
}
if (!seq)
- return 0;
+ return NULL;
assert(seq->type == type);
return tipc_nameseq_insert_publ(seq, type, lower, upper,
struct name_seq *seq = nametbl_find_seq(type);
if (!seq)
- return 0;
+ return NULL;
dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
{
struct sub_seq *sseq;
- struct publication *publ = 0;
+ struct publication *publ = NULL;
struct name_seq *seq;
u32 ref;
if (table.local_publ_count >= tipc_max_publications) {
warn("Failed publish: max %u local publication\n",
tipc_max_publications);
- return 0;
+ return NULL;
}
if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
warn("Failed to publish reserved name <%u,%u,%u>\n",
type, lower, upper);
- return 0;
+ return NULL;
}
write_lock_bh(&tipc_nametbl_lock);
*/
rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
-struct network tipc_net = { 0 };
+struct network tipc_net = { NULL };
struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
{
tipc_zone_delete(tipc_net.zones[z_num]);
}
kfree(tipc_net.zones);
- tipc_net.zones = 0;
+ tipc_net.zones = NULL;
}
static void net_route_named_msg(struct sk_buff *buf)
u32 i;
u32 highest_prio = 0;
- active[0] = active[1] = 0;
+ active[0] = active[1] = NULL;
for (i = 0; i < MAX_BEARERS; i++) {
struct link *l_ptr = n_ptr->links[i];
err("Attempt to create third link to %s\n",
addr_string_fill(addr_string, n_ptr->addr));
- return 0;
+ return NULL;
}
if (!n_ptr->links[bearer_id]) {
l_ptr->b_ptr->publ.name,
addr_string_fill(addr_string, l_ptr->addr));
}
- return 0;
+ return NULL;
}
void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
{
- n_ptr->links[l_ptr->b_ptr->identity] = 0;
+ n_ptr->links[l_ptr->b_ptr->identity] = NULL;
tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
n_ptr->link_cnt--;
}
/* Notify subscribers */
list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
- ns->node = 0;
+ ns->node = NULL;
list_del_init(&ns->nodesub_list);
tipc_k_signal((Handler)ns->handle_node_down,
(unsigned long)ns->usr_handle);
u32 router_addr;
if (!tipc_addr_domain_valid(addr))
- return 0;
+ return NULL;
/* Look for direct link to destination processsor */
n_ptr = tipc_node_find(addr);
/* Cluster local system nodes *must* have direct links */
if (!is_slave(addr) && in_own_cluster(addr))
- return 0;
+ return NULL;
/* Look for cluster local router with direct link to node */
router_addr = tipc_node_select_router(n_ptr, selector);
/* Slave nodes can only be accessed within own cluster via a
known router with direct link -- if no router was found,give up */
if (is_slave(addr))
- return 0;
+ return NULL;
/* Inter zone/cluster -- find any direct link to remote cluster */
addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
if (router_addr)
return tipc_node_select(router_addr, selector);
- return 0;
+ return NULL;
}
/**
if (c_ptr)
return c_ptr->nodes[tipc_node(addr)];
}
- return 0;
+ return NULL;
}
static inline struct node *tipc_node_select(u32 addr, u32 selector)
void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
void *usr_handle, net_ev_handler handle_down)
{
- node_sub->node = 0;
+ node_sub->node = NULL;
if (addr == tipc_own_addr)
return;
if (!tipc_addr_node_valid(addr)) {
#define MAX_REJECT_SIZE 1024
-static struct sk_buff *msg_queue_head = 0;
-static struct sk_buff *msg_queue_tail = 0;
+static struct sk_buff *msg_queue_head = NULL;
+static struct sk_buff *msg_queue_tail = NULL;
spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
p_ptr->publ.usr_handle = usr_handle;
INIT_LIST_HEAD(&p_ptr->wait_list);
INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
- p_ptr->congested_link = 0;
+ p_ptr->congested_link = NULL;
p_ptr->max_pkt = MAX_PKT_DEFAULT;
p_ptr->dispatcher = dispatcher;
p_ptr->wakeup = wakeup;
- p_ptr->user_port = 0;
+ p_ptr->user_port = NULL;
k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
spin_lock_bh(&tipc_port_list_lock);
INIT_LIST_HEAD(&p_ptr->publications);
int tipc_deleteport(u32 ref)
{
struct port *p_ptr;
- struct sk_buff *buf = 0;
+ struct sk_buff *buf = NULL;
- tipc_withdraw(ref, 0, 0);
+ tipc_withdraw(ref, 0, NULL);
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
- return 0;
+ return NULL;
handle = p_ptr->publ.usr_handle;
tipc_port_unlock(p_ptr);
return handle;
/* send self-abort message when rejecting on a connected port */
if (msg_connected(msg)) {
- struct sk_buff *abuf = 0;
+ struct sk_buff *abuf = NULL;
struct port *p_ptr = tipc_port_lock(msg_destport(msg));
if (p_ptr) {
static void port_timeout(unsigned long ref)
{
struct port *p_ptr = tipc_port_lock(ref);
- struct sk_buff *buf = 0;
+ struct sk_buff *buf = NULL;
if (!p_ptr || !p_ptr->publ.connected)
return;
static void port_handle_node_down(unsigned long ref)
{
struct port *p_ptr = tipc_port_lock(ref);
- struct sk_buff* buf = 0;
+ struct sk_buff* buf = NULL;
if (!p_ptr)
return;
u32 imp = msg_importance(&p_ptr->publ.phdr);
if (!p_ptr->publ.connected)
- return 0;
+ return NULL;
if (imp < TIPC_CRITICAL_IMPORTANCE)
imp++;
return port_build_proto_msg(p_ptr->publ.ref,
u32 imp = msg_importance(&p_ptr->publ.phdr);
if (!p_ptr->publ.connected)
- return 0;
+ return NULL;
if (imp < TIPC_CRITICAL_IMPORTANCE)
imp++;
return port_build_proto_msg(port_peerport(p_ptr),
struct tipc_msg *msg = buf_msg(buf);
struct port *p_ptr = tipc_port_lock(msg_destport(msg));
u32 err = TIPC_OK;
- struct sk_buff *r_buf = 0;
- struct sk_buff *abort_buf = 0;
+ struct sk_buff *r_buf = NULL;
+ struct sk_buff *abort_buf = NULL;
msg_dbg(msg, "PORT<RECV<:");
spin_lock_bh(&queue_lock);
buf = msg_queue_head;
- msg_queue_head = 0;
+ msg_queue_head = NULL;
spin_unlock_bh(&queue_lock);
while (buf) {
{
struct port *p_ptr;
struct user_port *up_ptr;
- tipc_continue_event cb = 0;
- void *uh = 0;
+ tipc_continue_event cb = NULL;
+ void *uh = NULL;
p_ptr = tipc_port_lock(ref);
if (p_ptr) {
void tipc_acknowledge(u32 ref, u32 ack)
{
struct port *p_ptr;
- struct sk_buff *buf = 0;
+ struct sk_buff *buf = NULL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
if (up_ptr == NULL) {
return -ENOMEM;
}
- ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
+ ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance);
p_ptr = tipc_port_lock(ref);
if (!p_ptr) {
kfree(up_ptr);
int tipc_shutdown(u32 ref)
{
struct port *p_ptr;
- struct sk_buff *buf = 0;
+ struct sk_buff *buf = NULL;
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
* because entry 0's reference field has the form XXXX|1--1.
*/
-struct ref_table tipc_ref_table = { 0 };
+struct ref_table tipc_ref_table = { NULL };
static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
write_lock_bh(&ref_table_lock);
index_mask = sz - 1;
for (i = sz - 1; i >= 0; i--) {
- table[i].object = 0;
+ table[i].object = NULL;
table[i].lock = SPIN_LOCK_UNLOCKED;
table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
}
return;
vfree(tipc_ref_table.entries);
- tipc_ref_table.entries = 0;
+ tipc_ref_table.entries = NULL;
}
/**
assert(entry->data.reference == ref);
/* mark entry as unused */
- entry->object = 0;
+ entry->object = NULL;
if (tipc_ref_table.first_free == 0)
tipc_ref_table.first_free = index;
else
return r->object;
spin_unlock_bh(&r->lock);
}
- return 0;
+ return NULL;
}
/**
if (likely(r->data.reference == ref))
return r->object;
}
- return 0;
+ return NULL;
}
#endif
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
- ref = tipc_createport_raw(0, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
+ ref = tipc_createport_raw(NULL, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
if (unlikely(!ref))
return -ENOMEM;
sock_lock(tsock);
buf = skb_dequeue(&sk->sk_receive_queue);
if (!buf)
- tsock->p->usr_handle = 0;
+ tsock->p->usr_handle = NULL;
sock_unlock(tsock);
if (!buf)
break;
return -ERESTARTSYS;
if (unlikely(!uaddr_len)) {
- res = tipc_withdraw(tsock->p->ref, 0, 0);
+ res = tipc_withdraw(tsock->p->ref, 0, NULL);
goto exit;
}
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
- struct msghdr m = {0,};
+ struct msghdr m = {NULL,};
struct sk_buff *buf;
struct tipc_msg *msg;
int res;
/* Send a 'SYN-' to destination */
m.msg_name = dest;
- if ((res = send_msg(0, sock, &m, 0)) < 0) {
+ if ((res = send_msg(NULL, sock, &m, 0)) < 0) {
sock->state = SS_DISCONNECTING;
return res;
}
msg_dbg(msg,"<ACC<: ");
if (!msg_data_sz(msg)) {
- struct msghdr m = {0,};
+ struct msghdr m = {NULL,};
- send_packet(0, newsock, &m, 0);
+ send_packet(NULL, newsock, &m, 0);
advance_queue(tsock);
} else {
sock_lock(tsock);
struct tipc_name_seq const *dest)
{
struct subscriber *subscriber;
- struct iovec msg_sect = {0, 0};
+ struct iovec msg_sect = {NULL, 0};
spinlock_t *subscriber_lock;
dbg("subscr_named_msg_event: orig = %x own = %x,\n",
tipc_createport(topsrv.user_ref,
(void *)(unsigned long)subscriber->ref,
importance,
- 0,
- 0,
+ NULL,
+ NULL,
subscr_conn_shutdown_event,
- 0,
- 0,
+ NULL,
+ NULL,
subscr_conn_msg_event,
- 0,
+ NULL,
&subscriber->port_ref);
if (subscriber->port_ref == 0) {
warn("Memory squeeze; failed to create subscription port\n");
INIT_LIST_HEAD(&topsrv.subscriber_list);
spin_lock_bh(&topsrv.lock);
- res = tipc_attach(&topsrv.user_ref, 0, 0);
+ res = tipc_attach(&topsrv.user_ref, NULL, NULL);
if (res) {
spin_unlock_bh(&topsrv.lock);
return res;
}
res = tipc_createport(topsrv.user_ref,
- 0,
+ NULL,
TIPC_CRITICAL_IMPORTANCE,
- 0,
- 0,
- 0,
- 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
subscr_named_msg_event,
- 0,
- 0,
+ NULL,
+ NULL,
&topsrv.setup_port);
if (res)
goto failed;
#define MAX_USERID 64
#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
-static struct tipc_user *users = 0;
+static struct tipc_user *users = NULL;
static u32 next_free_user = MAX_USERID + 1;
static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
reg_callback(&users[id]);
}
kfree(users);
- users = 0;
+ users = NULL;
}
/**
struct _zone *tipc_zone_create(u32 addr)
{
- struct _zone *z_ptr = 0;
+ struct _zone *z_ptr = NULL;
u32 z_num;
if (!tipc_addr_domain_valid(addr))
- return 0;
+ return NULL;
z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
if (z_ptr != NULL) {
u32 c_num;
if (!z_ptr)
- return 0;
+ return NULL;
c_ptr = z_ptr->clusters[tipc_cluster(addr)];
if (!c_ptr)
- return 0;
+ return NULL;
n_ptr = tipc_cltr_select_node(c_ptr, ref);
if (n_ptr)
return n_ptr;
for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
c_ptr = z_ptr->clusters[c_num];
if (!c_ptr)
- return 0;
+ return NULL;
n_ptr = tipc_cltr_select_node(c_ptr, ref);
if (n_ptr)
return n_ptr;
}
- return 0;
+ return NULL;
}
u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)