if (shmem_acct_block(info->flags, pages))
return false;
+
+ if (sbinfo->max_blocks) {
+ if (percpu_counter_compare(&sbinfo->used_blocks,
+ sbinfo->max_blocks - pages) > 0)
+ goto unacct;
+ percpu_counter_add(&sbinfo->used_blocks, pages);
+ }
+
spin_lock_irqsave(&info->lock, flags);
info->alloced += pages;
inode->i_blocks += pages * BLOCKS_PER_PAGE;
spin_unlock_irqrestore(&info->lock, flags);
inode->i_mapping->nrpages += pages;
- if (!sbinfo->max_blocks)
- return true;
- if (percpu_counter_compare(&sbinfo->used_blocks,
- sbinfo->max_blocks - pages) > 0) {
- inode->i_mapping->nrpages -= pages;
- spin_lock_irqsave(&info->lock, flags);
- info->alloced -= pages;
- shmem_recalc_inode(inode);
- spin_unlock_irqrestore(&info->lock, flags);
- shmem_unacct_blocks(info->flags, pages);
- return false;
- }
- percpu_counter_add(&sbinfo->used_blocks, pages);
return true;
+
+unacct:
+ shmem_unacct_blocks(info->flags, pages);
+ return false;
}
void shmem_uncharge(struct inode *inode, long pages)