struct snd_dma_buffer *dmab);
void snd_dma_free_pages(struct snd_dma_buffer *dmab);
-/* basic memory allocation functions */
-void *snd_malloc_pages(size_t size, gfp_t gfp_flags);
-void snd_free_pages(void *ptr, size_t size);
-
#endif /* __SOUND_MEMALLOC_H */
#endif
#include <sound/memalloc.h>
-/*
- *
- * Generic memory allocators
- *
- */
-
-/**
- * snd_malloc_pages - allocate pages with the given size
- * @size: the size to allocate in bytes
- * @gfp_flags: the allocation conditions, GFP_XXX
- *
- * Allocates the physically contiguous pages with the given size.
- *
- * Return: The pointer of the buffer, or %NULL if no enough memory.
- */
-void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
-{
- int pg;
-
- if (WARN_ON(!size))
- return NULL;
- if (WARN_ON(!gfp_flags))
- return NULL;
- gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
- pg = get_order(size);
- return (void *) __get_free_pages(gfp_flags, pg);
-}
-EXPORT_SYMBOL(snd_malloc_pages);
-
-/**
- * snd_free_pages - release the pages
- * @ptr: the buffer pointer to release
- * @size: the allocated buffer size
- *
- * Releases the buffer allocated via snd_malloc_pages().
- */
-void snd_free_pages(void *ptr, size_t size)
-{
- int pg;
-
- if (ptr == NULL)
- return;
- pg = get_order(size);
- free_pages((unsigned long) ptr, pg);
-}
-EXPORT_SYMBOL(snd_free_pages);
-
/*
*
* Bus-specific memory allocators
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
- dmab->area = snd_malloc_pages(size,
- (__force gfp_t)(unsigned long)device);
+ dmab->area = alloc_pages_exact(size,
+ (__force gfp_t)(unsigned long)device);
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
{
switch (dmab->dev.type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
- snd_free_pages(dmab->area, dmab->bytes);
+ free_pages_exact(dmab->area, dmab->bytes);
break;
#ifdef CONFIG_HAS_DMA
#ifdef CONFIG_GENERIC_ALLOCATOR
return -ENOMEM;
size = PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status));
- runtime->status = snd_malloc_pages(size, GFP_KERNEL);
+ runtime->status = alloc_pages_exact(size, GFP_KERNEL);
if (runtime->status == NULL) {
kfree(runtime);
return -ENOMEM;
}
- memset((void*)runtime->status, 0, size);
+ memset(runtime->status, 0, size);
size = PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control));
- runtime->control = snd_malloc_pages(size, GFP_KERNEL);
+ runtime->control = alloc_pages_exact(size, GFP_KERNEL);
if (runtime->control == NULL) {
- snd_free_pages((void*)runtime->status,
+ free_pages_exact(runtime->status,
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)));
kfree(runtime);
return -ENOMEM;
}
- memset((void*)runtime->control, 0, size);
+ memset(runtime->control, 0, size);
init_waitqueue_head(&runtime->sleep);
init_waitqueue_head(&runtime->tsleep);
runtime = substream->runtime;
if (runtime->private_free != NULL)
runtime->private_free(runtime);
- snd_free_pages((void*)runtime->status,
+ free_pages_exact(runtime->status,
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)));
- snd_free_pages((void*)runtime->control,
+ free_pages_exact(runtime->control,
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
kfree(runtime->hw_constraints.rules);
/* Avoid concurrent access to runtime via PCM timer interface */
if (!us428->us428ctls_sharedmem) {
init_waitqueue_head(&us428->us428ctls_wait_queue_head);
- if(!(us428->us428ctls_sharedmem = snd_malloc_pages(sizeof(struct us428ctls_sharedmem), GFP_KERNEL)))
+ us428->us428ctls_sharedmem = alloc_pages_exact(sizeof(struct us428ctls_sharedmem), GFP_KERNEL);
+ if (!us428->us428ctls_sharedmem)
return -ENOMEM;
memset(us428->us428ctls_sharedmem, -1, sizeof(struct us428ctls_sharedmem));
us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
kfree(usX2Y(card)->In04Buf);
usb_free_urb(usX2Y(card)->In04urb);
if (usX2Y(card)->us428ctls_sharedmem)
- snd_free_pages(usX2Y(card)->us428ctls_sharedmem, sizeof(*usX2Y(card)->us428ctls_sharedmem));
+ free_pages_exact(usX2Y(card)->us428ctls_sharedmem,
+ sizeof(*usX2Y(card)->us428ctls_sharedmem));
if (usX2Y(card)->card_index >= 0 && usX2Y(card)->card_index < SNDRV_CARDS)
snd_usX2Y_card_used[usX2Y(card)->card_index] = 0;
}
snd_printdd("snd_usX2Y_pcm_prepare(%p)\n", substream);
if (NULL == usX2Y->hwdep_pcm_shm) {
- if (NULL == (usX2Y->hwdep_pcm_shm = snd_malloc_pages(sizeof(struct snd_usX2Y_hwdep_pcm_shm), GFP_KERNEL)))
+ usX2Y->hwdep_pcm_shm = alloc_pages_exact(sizeof(struct snd_usX2Y_hwdep_pcm_shm),
+ GFP_KERNEL);
+ if (!usX2Y->hwdep_pcm_shm)
return -ENOMEM;
memset(usX2Y->hwdep_pcm_shm, 0, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
}
{
struct usX2Ydev *usX2Y = hwdep->private_data;
if (NULL != usX2Y->hwdep_pcm_shm)
- snd_free_pages(usX2Y->hwdep_pcm_shm, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
+ free_pages_exact(usX2Y->hwdep_pcm_shm, sizeof(struct snd_usX2Y_hwdep_pcm_shm));
}