rtas_error_log_max = rtas_get_error_log_max();
rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
- rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER);
+ rtas_log_buf = vmalloc(array_size(LOG_NUMBER,
+ rtas_error_log_buffer_max));
if (!rtas_log_buf) {
printk(KERN_ERR "rtasd: no memory\n");
return -ENOMEM;
npte = 1ul << (order - 4);
/* Allocate reverse map array */
- rev = vmalloc(sizeof(struct revmap_entry) * npte);
+ rev = vmalloc(array_size(npte, sizeof(struct revmap_entry)));
if (!rev) {
if (cma)
kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
static void *diag204_alloc_vbuf(int pages)
{
/* The buffer has to be page aligned! */
- diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
+ diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1)));
if (!diag204_buf_vmalloc)
return ERR_PTR(-ENOMEM);
diag204_buf = page_align_ptr(diag204_buf_vmalloc);
/* Allocate one syminfo structure per symbol. */
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
- me->arch.syminfo = vmalloc(me->arch.nsyms *
- sizeof(struct mod_arch_syminfo));
+ me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo),
+ me->arch.nsyms));
if (!me->arch.syminfo)
return -ENOMEM;
symbols = (void *) hdr + symtab->sh_offset;
if (pages <= 0)
return;
- diag204_buf = vmalloc(PAGE_SIZE * pages);
+ diag204_buf = vmalloc(array_size(pages, PAGE_SIZE));
if (!diag204_buf)
return;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
- pages = vmalloc(nr_pages * sizeof(unsigned long));
+ pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!pages)
return -ENOMEM;
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
if (args->count == 0)
return 0;
- bits = vmalloc(sizeof(*bits) * args->count);
+ bits = vmalloc(array_size(sizeof(*bits), args->count));
if (!bits)
return -ENOMEM;
goto out;
r = -ENOMEM;
if (cpuid->nent) {
- cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
- cpuid->nent);
+ cpuid_entries =
+ vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
+ cpuid->nent));
if (!cpuid_entries)
goto out;
r = -EFAULT;
fw_priv->page_array_size * 2);
struct page **new_pages;
- new_pages = vmalloc(new_array_size * sizeof(void *));
+ new_pages = vmalloc(array_size(new_array_size, sizeof(void *)));
if (!new_pages) {
fw_load_abort(fw_sysfs);
return -ENOMEM;
/* Called with ichan->chan_mutex held */
static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
{
- struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc));
+ struct idmac_tx_desc *desc =
+ vmalloc(array_size(n, sizeof(struct idmac_tx_desc)));
struct idmac *idmac = to_idmac(ichan->dma_chan.device);
if (!desc)
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(num_pages * sizeof(struct page *));
+ page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
if (!page_map)
return NULL;
struct nv84_fence_priv *priv = drm->fence;
int i;
- priv->suspend = vmalloc(drm->chan.nr * sizeof(u32));
+ priv->suspend = vmalloc(array_size(sizeof(u32), drm->chan.nr));
if (priv->suspend) {
for (i = 0; i < drm->chan.nr; i++)
priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width,
mode_cmd.height, mode_cmd.pitches[0]);
- shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
+ shadow = vmalloc(array_size(mode_cmd.pitches[0], mode_cmd.height));
/* TODO: what's the usual response to memory allocation errors? */
BUG_ON(!shadow);
DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
- rdev->gart.num_gpu_pages);
+ rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
+ rdev->gart.num_gpu_pages));
if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
DRM_MM_BUG_ON(!size);
ret = -ENOMEM;
- nodes = vmalloc(count * sizeof(*nodes));
+ nodes = vmalloc(array_size(count, sizeof(*nodes)));
if (!nodes)
goto err;
gart->iovmm_base = (dma_addr_t)res_remap->start;
gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
- gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
+ gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count));
if (!gart->savedata) {
dev_err(dev, "failed to allocate context save area\n");
return -ENOMEM;
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
- db->dict = vmalloc(hsize * sizeof(struct bsd_dict));
+ db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
if (!db->dict) {
bsd_free(db);
return NULL;
if (!decomp)
db->lens = NULL;
else {
- db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0]));
+ db->lens = vmalloc(array_size(sizeof(db->lens[0]),
+ maxmaxcode + 1));
if (!db->lens) {
bsd_free(db);
return (NULL);
up(&gc->gc_sem);
- gc_rq->data = vmalloc(gc_rq->nr_secs * geo->csecs);
+ gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
if (!gc_rq->data) {
pr_err("pblk: could not GC line:%d (%d/%d)\n",
line->id, *line->vsc, gc_rq->nr_secs);
uint16_t q[31], *p, *cached;
ssize_t ret;
- cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
+ cached = p = vmalloc(array_size(sizeof(uint16_t),
+ ca->sb.nbuckets));
if (!p)
return -ENOMEM;
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
ht->hash_bits = __ffs(nr_buckets);
- ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
+ ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
if (!ht->buckets)
return -ENOMEM;
rh->shift = RH_HASH_SHIFT;
rh->prime = RH_HASH_MULT;
- rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
+ rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
if (!rh->buckets) {
DMERR("unable to allocate region hash bucket memory");
kfree(rh);
return -EINVAL;
}
- sctx->region_table = vmalloc(nr_slots * sizeof(region_table_slot_t));
+ sctx->region_table = vmalloc(array_size(nr_slots,
+ sizeof(region_table_slot_t)));
if (!sctx->region_table) {
ti->error = "Cannot allocate region table";
return -ENOMEM;
goto bad_mapping_pool;
}
- pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
+ pool->cell_sort_array =
+ vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
+ sizeof(*pool->cell_sort_array)));
if (!pool->cell_sort_array) {
*error = "Error allocating cell sort array";
err_p = ERR_PTR(-ENOMEM);
if (dmxdev->demux->open(dmxdev->demux) < 0)
return -EUSERS;
- dmxdev->filter = vmalloc(dmxdev->filternum * sizeof(struct dmxdev_filter));
+ dmxdev->filter = vmalloc(array_size(sizeof(struct dmxdev_filter),
+ dmxdev->filternum));
if (!dmxdev->filter)
return -ENOMEM;
dvbdemux->cnt_storage = NULL;
dvbdemux->users = 0;
- dvbdemux->filter = vmalloc(dvbdemux->filternum * sizeof(struct dvb_demux_filter));
+ dvbdemux->filter = vmalloc(array_size(sizeof(struct dvb_demux_filter),
+ dvbdemux->filternum));
if (!dvbdemux->filter)
return -ENOMEM;
- dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
+ dvbdemux->feed = vmalloc(array_size(sizeof(struct dvb_demux_feed),
+ dvbdemux->feednum));
if (!dvbdemux->feed) {
vfree(dvbdemux->filter);
dvbdemux->filter = NULL;
ret = -ENOMEM;
meye.mchip_dev = pcidev;
- meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
+ meye.grab_temp = vmalloc(array_size(PAGE_SIZE, MCHIP_NB_PAGES_MJPEG));
if (!meye.grab_temp)
goto outvmalloc;
if (!pt1_nr_tables)
return 0;
- tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
+ tables = vmalloc(array_size(pt1_nr_tables, sizeof(struct pt1_table)));
if (tables == NULL)
return -ENOMEM;
int av7110_ipack_init(struct ipack *p, int size,
void (*func)(u8 *buf, int size, void *priv))
{
- if (!(p->buf = vmalloc(size*sizeof(u8)))) {
+ if (!(p->buf = vmalloc(size))) {
printk(KERN_WARNING "Couldn't allocate memory for ipack\n");
return -ENOMEM;
}
return -ENXIO;
icd->user_formats =
- vmalloc(fmts * sizeof(struct soc_camera_format_xlate));
+ vmalloc(array_size(fmts,
+ sizeof(struct soc_camera_format_xlate)));
if (!icd->user_formats)
return -ENOMEM;
if (NULL == pages[0])
return NULL;
- sglist = vmalloc(nr_pages * sizeof(*sglist));
+ sglist = vmalloc(array_size(nr_pages, sizeof(*sglist)));
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
/* Set up virtual page map */
blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
- part->VirtualBlockMap = vmalloc(blocks * sizeof(uint32_t));
+ part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t)));
if (!part->VirtualBlockMap)
goto out_XferInfo;
}
/* oops_page_used is a bit field */
- cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG) * sizeof(unsigned long));
+ cxt->oops_page_used =
+ vmalloc(array_size(sizeof(unsigned long),
+ DIV_ROUND_UP(mtdoops_pages,
+ BITS_PER_LONG)));
if (!cxt->oops_page_used) {
printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
for (i = 0; i < MTDSWAP_TREE_CNT; i++)
d->trees[i].root = RB_ROOT;
- d->page_data = vmalloc(sizeof(int)*pages);
+ d->page_data = vmalloc(array_size(pages, sizeof(int)));
if (!d->page_data)
goto page_data_fail;
- d->revmap = vmalloc(sizeof(int)*blocks);
+ d->revmap = vmalloc(array_size(blocks, sizeof(int)));
if (!d->revmap)
goto revmap_fail;
return 0;
}
- ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
+ ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
if (!ns->pages) {
NS_ERR("alloc_device: unable to allocate page array\n");
return -ENOMEM;
if (!part->blocks)
goto err;
- part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
+ part->sector_map = vmalloc(array_size(sizeof(u_long),
+ part->sector_count));
if (!part->sector_map) {
printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
"sector map", part->mbd.mtd->name);
iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
numa_node);
if (!iq->request_list)
- iq->request_list = vmalloc(sizeof(*iq->request_list) *
- num_descs);
+ iq->request_list =
+ vmalloc(array_size(num_descs,
+ sizeof(*iq->request_list)));
if (!iq->request_list) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
/* allocate temporary buffer to store rings in */
i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
- temp_ring = vmalloc(i * sizeof(struct fm10k_ring));
+ temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
if (!temp_ring) {
err = -ENOMEM;
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(adapter->num_tx_queues *
- sizeof(struct igb_ring));
+ temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
+ adapter->num_tx_queues));
else
- temp_ring = vmalloc(adapter->num_rx_queues *
- sizeof(struct igb_ring));
+ temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
+ adapter->num_rx_queues));
if (!temp_ring) {
err = -ENOMEM;
/* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
adapter->num_rx_queues);
- temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
+ temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
if (!temp_ring) {
err = -ENOMEM;
}
if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = vmalloc((adapter->num_tx_queues +
- adapter->num_xdp_queues) * sizeof(*tx_ring));
+ tx_ring = vmalloc(array_size(sizeof(*tx_ring),
+ adapter->num_tx_queues +
+ adapter->num_xdp_queues));
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
}
if (new_rx_count != adapter->rx_ring_count) {
- rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
+ rx_ring = vmalloc(array_size(sizeof(*rx_ring),
+ adapter->num_rx_queues));
if (!rx_ring) {
err = -ENOMEM;
goto clear_reset;
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
- vmalloc(NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
+ NFP_FL_STATS_ENTRY_RS));
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
- db->dict = vmalloc(hsize * sizeof(struct bsd_dict));
+ db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
if (!db->dict)
{
bsd_free (db);
*/
else
{
- db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0]));
+ db->lens = vmalloc(array_size(sizeof(db->lens[0]), (maxmaxcode + 1)));
if (!db->lens)
{
bsd_free (db);
/* Create buffer and read in eeprom */
- buf = vmalloc(eesize * 2);
+ buf = vmalloc(array_size(eesize, 2));
if (!buf) {
ret = -ENOMEM;
goto err;
* additional active scan request for hidden SSIDs on passive channels.
*/
adapter->num_in_chan_stats = 2 * (n_channels_bg + n_channels_a);
- adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
- adapter->num_in_chan_stats);
+ adapter->chan_stats = vmalloc(array_size(sizeof(*adapter->chan_stats),
+ adapter->num_in_chan_stats));
if (!adapter->chan_stats)
return -ENOMEM;
return -EINVAL;
buffer_pos = 0;
- event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
+ event_buffer = vmalloc(array_size(buffer_size, sizeof(unsigned long)));
if (!event_buffer)
return -ENOMEM;
priv->md->properties.transfer_mode) == 0)
return -ENODEV;
- transfer = vmalloc(transaction.count * sizeof(*transfer));
+ transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
if (!transfer)
return -ENOMEM;
return -ENOMEM;
if (*rdata_ptr == fc_trc_flag->fnic_trace) {
- fnic_dbg_prt->buffer = vmalloc(3 *
- (trace_max_pages * PAGE_SIZE));
+ fnic_dbg_prt->buffer = vmalloc(array3_size(3, trace_max_pages,
+ PAGE_SIZE));
if (!fnic_dbg_prt->buffer) {
kfree(fnic_dbg_prt);
return -ENOMEM;
fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
} else {
fnic_dbg_prt->buffer =
- vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
+ vmalloc(array3_size(3, fnic_fc_trace_max_pages,
+ PAGE_SIZE));
if (!fnic_dbg_prt->buffer) {
kfree(fnic_dbg_prt);
return -ENOMEM;
}
memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
- fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
- sizeof(unsigned long));
+ fnic_trace_entries.page_offset =
+ vmalloc(array_size(fnic_max_trace_entries,
+ sizeof(unsigned long)));
if (!fnic_trace_entries.page_offset) {
printk(KERN_ERR PFX "Failed to allocate memory for"
" page_offset\n");
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
FC_TRC_SIZE_BYTES;
- fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
- fnic_fc_trace_max_pages * PAGE_SIZE);
+ fnic_fc_ctlr_trace_buf_p =
+ (unsigned long)vmalloc(array_size(PAGE_SIZE,
+ fnic_fc_trace_max_pages));
if (!fnic_fc_ctlr_trace_buf_p) {
pr_err("fnic: Failed to allocate memory for "
"FC Control Trace Buf\n");
fnic_fc_trace_max_pages * PAGE_SIZE);
/* Allocate memory for page offset */
- fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
- sizeof(unsigned long));
+ fc_trace_entries.page_offset =
+ vmalloc(array_size(fc_trace_max_entries,
+ sizeof(unsigned long)));
if (!fc_trace_entries.page_offset) {
pr_err("fnic:Failed to allocate memory for page_offset\n");
if (fnic_fc_ctlr_trace_buf_p) {
}
if (ioa_cfg->sis64)
- ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
+ ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
+ sizeof(__be32 *)));
else
- ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
+ ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
+ sizeof(__be32 *)));
if (!ioa_data) {
ipr_err("Dump memory allocation failed\n");
int dbg = debugging;
#endif
- if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL)
+ if ((buffer = vmalloc(array_size((nframes + 1), OS_DATA_SIZE))) == NULL)
return (-EIO);
printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n",
}
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
- map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
+ map_storep = vmalloc(array_size(sizeof(long),
+ BITS_TO_LONGS(map_size)));
pr_info("%lu provisioning blocks\n", map_size);
pgprot_t pgprot;
struct sg_table *table = buffer->sg_table;
int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **pages = vmalloc(array_size(npages,
+ sizeof(struct page *)));
struct page **tmp = pages;
if (!pages)
gcam->debugfs.root = debugfs_create_dir(dirname, gb_debugfs_get());
- gcam->debugfs.buffers = vmalloc(sizeof(*gcam->debugfs.buffers) *
- GB_CAMERA_DEBUGFS_BUFFER_MAX);
+ gcam->debugfs.buffers =
+ vmalloc(array_size(GB_CAMERA_DEBUGFS_BUFFER_MAX,
+ sizeof(*gcam->debugfs.buffers)));
if (!gcam->debugfs.buffers)
return -ENOMEM;
}
} else if (clipcount) {
/* write our own bitmap from the clips */
- vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4));
+ vcp = vmalloc(array_size(sizeof(struct v4l2_clip),
+ clipcount + 4));
if (vcp == NULL) {
dprintk(1,
KERN_ERR
segment = &ms_card->segment[seg_no];
if (!segment->l2p_table) {
- segment->l2p_table = vmalloc(table_size * 2);
+ segment->l2p_table = vmalloc(array_size(table_size, 2));
if (!segment->l2p_table) {
rtsx_trace(chip);
goto BUILD_FAIL;
dev_dbg(rtsx_dev(chip), "dw_len = %d\n", dw_len);
- data = vmalloc(dw_len * 4);
+ data = vmalloc(array_size(dw_len, 4));
if (!data) {
rtsx_trace(chip);
return STATUS_NOMEM;
}
if (!sisusb->font_backup)
- sisusb->font_backup = vmalloc(charcount * 32);
+ sisusb->font_backup = vmalloc(array_size(charcount, 32));
if (sisusb->font_backup) {
memcpy(sisusb->font_backup, font->data, charcount * 32);
info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
+ info->gfns = vmalloc(array_size(sizeof(unsigned long), info->nr_pages));
if (!info->gfns)
goto error_nomem;
if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
goto end_coredump;
- vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz));
+ vma_filesz = vmalloc(array_size(sizeof(*vma_filesz), (segs - 1)));
if (!vma_filesz)
goto end_coredump;
GFP_KERNEL);
if (!bv) {
- bv = vmalloc(max_pages * sizeof(struct bio_vec));
+ bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
if (!bv)
return -ENOMEM;
}
GFP_KERNEL);
if (!pages) {
- pages = vmalloc(max_pages * sizeof(struct page *));
+ pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
if (!pages) {
kvfree(bv);
return -ENOMEM;
size = dlm_config.ci_rsbtbl_size;
ls->ls_rsbtbl_size = size;
- ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size);
+ ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable)));
if (!ls->ls_rsbtbl)
goto out_lsfree;
for (i = 0; i < size; i++) {
struct reiserfs_bitmap_info *bitmap;
unsigned int bmap_nr = reiserfs_bmap_count(sb);
- bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
+ bitmap = vmalloc(array_size(bmap_nr, sizeof(*bitmap)));
if (bitmap == NULL)
return -ENOMEM;
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL);
nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL);
buf = vmalloc(c->leb_size);
- ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
+ ltab = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops),
+ c->lpt_lebs));
if (!pnode || !nnode || !buf || !ltab || !lsave) {
err = -ENOMEM;
goto out;
{
int err, i;
- c->ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
+ c->ltab = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops),
+ c->lpt_lebs));
if (!c->ltab)
return -ENOMEM;
{
int err, i;
- c->ltab_cmt = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
+ c->ltab_cmt = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops),
+ c->lpt_lebs));
if (!c->ltab_cmt)
return -ENOMEM;
static void *pidlist_allocate(int count)
{
if (PIDLIST_TOO_LARGE(count))
- return vmalloc(count * sizeof(pid_t));
+ return vmalloc(array_size(count, sizeof(pid_t)));
else
return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
}
goto out_clean;
}
- data = vmalloc(sizeof(*data) * nr_threads);
+ data = vmalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
- page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
+ page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
if (!page) {
pr_err("Failed to allocate LZO page\n");
ret = -ENOMEM;
goto out_clean;
}
- data = vmalloc(sizeof(*data) * nr_threads);
+ data = vmalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
cbflood_intra_holdoff > 0 &&
cur_ops->call &&
cur_ops->cb_barrier) {
- rhp = vmalloc(sizeof(*rhp) *
- cbflood_n_burst * cbflood_n_per_burst);
+ rhp = vmalloc(array3_size(cbflood_n_burst,
+ cbflood_n_per_burst,
+ sizeof(*rhp)));
err = !rhp;
}
if (err) {
struct tracing_map_sort_entry *sort_entry, **entries;
int i, n_entries, ret;
- entries = vmalloc(map->max_elts * sizeof(sort_entry));
+ entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts));
if (!entries)
return -ENOMEM;
spin_unlock_irq(&pcpu_lock);
/* there can be at most this many free and allocated fragments */
- buffer = vmalloc((2 * max_nr_alloc + 1) * sizeof(int));
+ buffer = vmalloc(array_size(sizeof(int), (2 * max_nr_alloc + 1)));
if (!buffer)
return -ENOMEM;
* if an error occurs
*/
newinfo->chainstack =
- vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
+ vmalloc(array_size(nr_cpu_ids,
+ sizeof(*(newinfo->chainstack))));
if (!newinfo->chainstack)
return -ENOMEM;
for_each_possible_cpu(i) {
newinfo->chainstack[i] =
- vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
+ vmalloc(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))));
if (!newinfo->chainstack[i]) {
while (i)
vfree(newinfo->chainstack[--i]);
}
}
- cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
+ cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s)));
if (!cl_s)
return -ENOMEM;
i = 0; /* the i'th udc */
if (num_counters == 0)
return -EINVAL;
- tmp = vmalloc(num_counters * sizeof(*tmp));
+ tmp = vmalloc(array_size(num_counters, sizeof(*tmp)));
if (!tmp)
return -ENOMEM;
return -EINVAL;
}
- counterstmp = vmalloc(nentries * sizeof(*counterstmp));
+ counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
if (!counterstmp)
return -ENOMEM;
/*
* Allocate the connection hash table and initialize its list heads
*/
- ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
+ ip_vs_conn_tab = vmalloc(array_size(ip_vs_conn_tab_size,
+ sizeof(*ip_vs_conn_tab)));
if (!ip_vs_conn_tab)
return -ENOMEM;
if (snd_BUG_ON(!pool))
return -EINVAL;
- cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+ cellptr = vmalloc(array_size(sizeof(struct snd_seq_event_cell),
+ pool->size));
if (!cellptr)
return -ENOMEM;
return NULL;
/* better to use vmalloc for this big table */
- ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
- DSP_MAX_SYMBOLS);
+ ins->symbol_table.symbols =
+ vmalloc(array_size(DSP_MAX_SYMBOLS,
+ sizeof(struct dsp_symbol_entry)));
ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
ins->modules = kmalloc_array(DSP_MAX_MODULES,
sizeof(struct dsp_module_desc),
(unsigned long)emu->ptb_pages.addr,
(unsigned long)(emu->ptb_pages.addr + emu->ptb_pages.bytes));
- emu->page_ptr_table = vmalloc(emu->max_cache_pages * sizeof(void *));
- emu->page_addr_table = vmalloc(emu->max_cache_pages *
- sizeof(unsigned long));
+ emu->page_ptr_table = vmalloc(array_size(sizeof(void *),
+ emu->max_cache_pages));
+ emu->page_addr_table = vmalloc(array_size(sizeof(unsigned long),
+ emu->max_cache_pages));
if (emu->page_ptr_table == NULL || emu->page_addr_table == NULL) {
err = -ENOMEM;
goto error;
size = ARRAY_SIZE(saved_regs);
if (emu->audigy)
size += ARRAY_SIZE(saved_regs_audigy);
- emu->saved_ptr = vmalloc(4 * NUM_G * size);
+ emu->saved_ptr = vmalloc(array3_size(4, NUM_G, size));
if (!emu->saved_ptr)
return -ENOMEM;
if (snd_emu10k1_efx_alloc_pm_buffer(emu) < 0)
if (! emu->tram_val_saved || ! emu->tram_addr_saved)
return -ENOMEM;
len = emu->audigy ? 2 * 1024 : 2 * 512;
- emu->saved_icode = vmalloc(len * 4);
+ emu->saved_icode = vmalloc(array_size(len, 4));
if (! emu->saved_icode)
return -ENOMEM;
return 0;
int snd_p16v_alloc_pm_buffer(struct snd_emu10k1 *emu)
{
- emu->p16v_saved = vmalloc(NUM_CHS * 4 * 0x80);
+ emu->p16v_saved = vmalloc(array_size(NUM_CHS * 4, 0x80));
if (! emu->p16v_saved)
return -ENOMEM;
return 0;
chip->irq = pci->irq;
#ifdef CONFIG_PM_SLEEP
- chip->suspend_mem = vmalloc(sizeof(u16) * (REV_B_CODE_MEMORY_LENGTH + REV_B_DATA_MEMORY_LENGTH));
+ chip->suspend_mem =
+ vmalloc(array_size(sizeof(u16),
+ REV_B_CODE_MEMORY_LENGTH +
+ REV_B_DATA_MEMORY_LENGTH));
if (chip->suspend_mem == NULL)
dev_warn(card->dev, "can't allocate apm buffer\n");
#endif
trident->tlb.entries = (unsigned int*)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4);
trident->tlb.entries_dmaaddr = ALIGN(trident->tlb.buffer.addr, SNDRV_TRIDENT_MAX_PAGES * 4);
/* allocate shadow TLB page table (virtual addresses) */
- trident->tlb.shadow_entries = vmalloc(SNDRV_TRIDENT_MAX_PAGES*sizeof(unsigned long));
+ trident->tlb.shadow_entries =
+ vmalloc(array_size(SNDRV_TRIDENT_MAX_PAGES,
+ sizeof(unsigned long)));
if (!trident->tlb.shadow_entries)
return -ENOMEM;
goto out;
if (routing.nr) {
r = -ENOMEM;
- entries = vmalloc(routing.nr * sizeof(*entries));
+ entries = vmalloc(array_size(sizeof(*entries),
+ routing.nr));
if (!entries)
goto out;
r = -EFAULT;