if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
return time_syscall(t);
- vgettimeofday(&tv, 0);
+ vgettimeofday(&tv, NULL);
result = tv.tv_sec;
if (t)
*t = result;
/* Filter out DMA modes if the device has been configured by
the BIOS as PIO only */
- if (adev->link->ap->ioaddr.bmdma_addr == 0)
+ if (adev->link->ap->ioaddr.bmdma_addr == NULL)
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
return xfer_mask;
}
unsigned long flags;
int map, block;
- if ((page = pool_find_page (pool, dma)) == 0) {
+ if ((page = pool_find_page(pool, dma)) == NULL) {
if (pool->dev)
dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long) dma);
void add_interrupt_randomness(int irq)
{
- if (irq >= NR_IRQS || irq_timer_state[irq] == 0)
+ if (irq >= NR_IRQS || irq_timer_state[irq] == NULL)
return;
DEBUG_ENT("irq event %d\n", irq);
{
struct autofs_wait_queue *wq, **wql;
- for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) {
+ for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
if ( wq->wait_queue_token == wait_queue_token )
break;
}
struct autofs_wait_queue *wq, **wql;
mutex_lock(&sbi->wq_mutex);
- for (wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next) {
+ for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
if (wq->wait_queue_token == wait_queue_token)
break;
}
int i;
for (i = 0; i < ARRAY_SIZE(ioctl_start); i++) {
- if (ioctl_start[i].next != 0) {
+ if (ioctl_start[i].next) {
printk("ioctl translation %d bad\n",i);
return -1;
}
* holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
*/
- while ((rbp = rb_first(&ep->rbr)) != 0) {
+ while ((rbp = rb_first(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
}
struct inode *inode = dentry->d_inode;
int ret = 0;
- J_ASSERT(ext3_journal_current_handle() == 0);
+ J_ASSERT(ext3_journal_current_handle() == NULL);
/*
* data=writeback:
}
if (buffer_new(&dummy)) {
J_ASSERT(create != 0);
- J_ASSERT(handle != 0);
+ J_ASSERT(handle != NULL);
/*
* Now that we do not always journal data, we should
if (IS_ERR(t))
return PTR_ERR(t);
- wait_event(journal->j_wait_done_commit, journal->j_task != 0);
+ wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
return 0;
}
while (journal->j_task) {
wake_up(&journal->j_wait_commit);
spin_unlock(&journal->j_state_lock);
- wait_event(journal->j_wait_done_commit, journal->j_task == 0);
+ wait_event(journal->j_wait_done_commit,
+ journal->j_task == NULL);
spin_lock(&journal->j_state_lock);
}
spin_unlock(&journal->j_state_lock);
atomic_inc(&nr_journal_heads);
#endif
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
- if (ret == 0) {
+ if (ret == NULL) {
jbd_debug(1, "out of memory for journal_head\n");
if (time_after(jiffies, last_warning + 5*HZ)) {
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
__FUNCTION__);
last_warning = jiffies;
}
- while (ret == 0) {
+ while (ret == NULL) {
yield();
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
}
}
/* That test should have eliminated the following case: */
- J_ASSERT_JH(jh, jh->b_frozen_data == 0);
+ J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
JBUFFER_TRACE(jh, "file as BJ_Metadata");
spin_lock(&journal->j_list_lock);
J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
if (jh->b_jlist != BJ_None)
- J_ASSERT_JH(jh, transaction != 0);
+ J_ASSERT_JH(jh, transaction != NULL);
switch (jh->b_jlist) {
case BJ_None:
if (buffer_locked(bh) || buffer_dirty(bh))
goto out;
- if (jh->b_next_transaction != 0)
+ if (jh->b_next_transaction != NULL)
goto out;
spin_lock(&journal->j_list_lock);
- if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) {
+ if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
/* A written-back ordered data buffer */
JBUFFER_TRACE(jh, "release data");
journal_remove_journal_head(bh);
__brelse(bh);
}
- } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
+ } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
/* written-back checkpointed metadata buffer */
if (jh->b_jlist == BJ_None) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
J_ASSERT_JH(jh, jh->b_transaction == transaction ||
- jh->b_transaction == 0);
+ jh->b_transaction == NULL);
if (jh->b_transaction && jh->b_jlist == jlist)
return;
int c, nc;
const struct utf8_table *t;
- if (s == 0)
+ if (!s)
return 0;
l = wc;
unsigned char *buf;
buf = kmalloc(size, GFP_KERNEL);
- if (buf == 0)
+ if (!buf)
return -1;
minixsb = (struct minix_super_block *) buf;
crd_infd = in_fd;
crd_outfd = out_fd;
inbuf = kmalloc(INBUFSIZ, GFP_KERNEL);
- if (inbuf == 0) {
+ if (!inbuf) {
printk(KERN_ERR "RAMDISK: Couldn't allocate gzip buffer\n");
return -1;
}
window = kmalloc(WSIZE, GFP_KERNEL);
- if (window == 0) {
+ if (!window) {
printk(KERN_ERR "RAMDISK: Couldn't allocate gzip window\n");
kfree(inbuf);
return -1;
*/
void drop_futex_key_refs(union futex_key *key)
{
- if (key->both.ptr == 0)
+ if (!key->both.ptr)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
retry:
lock_ptr = q->lock_ptr;
barrier();
- if (lock_ptr != 0) {
+ if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
size_t uchunk, mchunk;
page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
- if (page == 0) {
+ if (!page) {
result = -ENOMEM;
goto out;
}
size_t uchunk, mchunk;
page = pfn_to_page(maddr >> PAGE_SHIFT);
- if (page == 0) {
+ if (!page) {
result = -ENOMEM;
goto out;
}
* size such that we can guarentee to record the reservation. */
if (&rg->link == head || t < rg->from) {
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
- if (nrg == 0)
+ if (!nrg)
return -ENOMEM;
nrg->from = f;
nrg->to = f;
if ((addr <= new_addr) && (addr+old_len) > new_addr)
goto out;
- ret = security_file_mmap(0, 0, 0, 0, new_addr, 1);
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
if (ret)
goto out;
goto out;
}
- ret = security_file_mmap(0, 0, 0, 0, new_addr, 1);
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
if (ret)
goto out;
}
*/
if (priority < 0)
priority = 0;
- for (i = 0; zones[i] != 0; i++) {
+ for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
if (dst) {
struct dst_entry *dst_test;
- for (dst_test = dst; dst_test != 0;
+ for (dst_test = dst; dst_test != NULL;
dst_test = dst_test->child) {
struct xfrm_state *x = dst_test->xfrm;