};
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast gfp)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
};
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast gfp)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
static void *bpa_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;
EXPORT_SYMBOL(dma_set_mask);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
void *ret = NULL;
dma_addr_t mapping;
#include "pci.h"
static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
void *ret;
* to the dma address (mapping) of the first page.
*/
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
flag);
}
static void *vio_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag)
+ dma_addr_t *dma_handle, gfp_t flag)
{
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
dma_handle, flag);
}
static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
- unsigned int __nocast priority)
+ gfp_t priority)
{
rx_in rx;
amb_rxq * rxq;
}
}
-static void __devinit *aligned_kmalloc (int size, unsigned int __nocast flags,
- int alignment)
+static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment)
{
void *t;
working again after that... -- REW */
static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
- unsigned int __nocast gfp_flags)
+ gfp_t gfp_flags)
{
struct FS_BPENTRY *qe, *ne;
struct sk_buff *skb;
static void*
-fore200e_kmalloc(int size, unsigned int __nocast flags)
+fore200e_kmalloc(int size, gfp_t flags)
{
void *chunk = kzalloc(size, flags);
static struct dma_page *
-pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags)
+pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
{
struct dma_page *page;
int mapsize;
* If such a memory block can't be allocated, null is returned.
*/
void *
-dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags,
- dma_addr_t *handle)
+dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
{
unsigned long flags;
struct dma_page *page;
return 1;
}
-static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
+static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
{
return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
}
}
-static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data)
+static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
{
return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
}
}
static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe,
- size_t size, unsigned int __nocast flags, void *data)
+ size_t size, gfp_t flags, void *data)
{
struct urb *urb;
struct usb_ctrlrequest *cr;
{ } /* Terminating entry */
};
-static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp)
+static struct _urb *_urb_alloc(int isoc, gfp_t gfp)
{
struct _urb *_urb = kmalloc(sizeof(struct _urb) +
sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
* a new message.
*
*/
-int cn_netlink_send(struct cn_msg *msg, u32 __group,
- unsigned int __nocast gfp_mask)
+int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
{
struct cn_callback_entry *__cbq;
unsigned int size;
static void queue_complete_cb(struct pending_request *req);
-static struct pending_request *__alloc_pending_request(unsigned int __nocast flags)
+static struct pending_request *__alloc_pending_request(gfp_t flags)
{
struct pending_request *req;
u32 remote_qpn, u16 pkey_index,
struct ib_ah *ah, int rmpp_active,
int hdr_len, int data_len,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf *send_buf;
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_path_rec *resp,
void *context),
int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_service_rec *resp,
void *context),
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
/*
* Mempool alloc and free functions for the page
*/
-static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data)
+static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
static unsigned _num_ios;
static mempool_t *_io_pool;
-static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data)
+static void *alloc_io(gfp_t gfp_mask, void *pool_data)
{
return kmalloc(sizeof(struct io), gfp_mask);
}
/* FIXME move this */
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
-static void *region_alloc(unsigned int __nocast gfp_mask, void *pool_data)
+static void *region_alloc(gfp_t gfp_mask, void *pool_data)
{
return kmalloc(sizeof(struct region), gfp_mask);
}
static mdk_personality_t multipath_personality;
-static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data)
+static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
{
struct multipath_bh *mpb;
mpb = kmalloc(sizeof(*mpb), gfp_flags);
static void unplug_slaves(mddev_t *mddev);
-static void * r1bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
+static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
r1bio_t *r1_bio;
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
#define RESYNC_WINDOW (2048*1024)
-static void * r1buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
+static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
struct page *page;
static void unplug_slaves(mddev_t *mddev);
-static void * r10bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
+static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
r10bio_t *r10_bio;
* one for write (we recover only one drive per r10buf)
*
*/
-static void * r10buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
+static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
struct page *page;
* Copy all the Multicast addresses from src to the bonding device dst
*/
static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
- unsigned int __nocast gfp_flag)
+ gfp_t gfp_flag)
{
struct dev_mc_list *dmi, *new_dmi;
return 0;
}
-static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
+static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
{
struct ns83820 *dev = PRIV(ndev);
unsigned i;
#define ALIGNED_RX_SKB_ADDR(addr) \
((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
static __inline__ struct sk_buff *gem_alloc_skb(int size,
- unsigned int __nocast gfp_flags)
+ gfp_t gfp_flags)
{
struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
}
static void *
-zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size)
+zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
{
return kmalloc((size_t) size, gfp_mask);
}
*/
static struct bio_set *fs_bio_set;
-static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
+static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
{
struct bio_vec *bvl;
struct biovec_slab *bp;
* allocate bio and iovecs from the memory pools specified by the
* bio_set structure.
**/
-struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs)
+struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
return bio;
}
-struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
+struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
{
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
*
* Like __bio_clone, only also allocates the returned bio
*/
-struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask)
+struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
{
struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
return bp;
}
-static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data)
+static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
{
return kmalloc(sizeof(struct bio_pair), gfp_flags);
}
buffer_heads_over_limit = (tot > max_buffer_heads);
}
-struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
static struct bio *
mpage_alloc(struct block_device *bdev,
sector_t first_sector, int nr_vecs,
- unsigned int __nocast gfp_flags)
+ gfp_t gfp_flags)
{
struct bio *bio;
* Depending on @gfp_mask the allocation may be guaranteed to succeed.
*/
static inline void *__ntfs_malloc(unsigned long size,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
if (likely(size <= PAGE_SIZE)) {
BUG_ON(!size);
* Allocate a new ACL with the specified number of entries.
*/
struct posix_acl *
-posix_acl_alloc(int count, unsigned int __nocast flags)
+posix_acl_alloc(int count, gfp_t flags)
{
const size_t size = sizeof(struct posix_acl) +
count * sizeof(struct posix_acl_entry);
* Clone an ACL.
*/
struct posix_acl *
-posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags)
+posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
{
struct posix_acl *clone = NULL;
* Create an ACL representing the file mode permission bits of an inode.
*/
struct posix_acl *
-posix_acl_from_mode(mode_t mode, unsigned int __nocast flags)
+posix_acl_from_mode(mode_t mode, gfp_t flags)
{
struct posix_acl *acl = posix_acl_alloc(3, flags);
if (!acl)
void *
-kmem_alloc(size_t size, unsigned int __nocast flags)
+kmem_alloc(size_t size, gfp_t flags)
{
int retries = 0;
unsigned int lflags = kmem_flags_convert(flags);
}
void *
-kmem_zalloc(size_t size, unsigned int __nocast flags)
+kmem_zalloc(size_t size, gfp_t flags)
{
void *ptr;
void *
kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
- unsigned int __nocast flags)
+ gfp_t flags)
{
void *new;
}
void *
-kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
{
int retries = 0;
unsigned int lflags = kmem_flags_convert(flags);
}
void *
-kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
+kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags)
{
void *ptr;
*(NSTATEP) = *(OSTATEP); \
} while (0)
-static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags)
+static __inline unsigned int kmem_flags_convert(gfp_t flags)
{
unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
BUG();
}
-extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
-extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
+extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t);
+extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t);
-extern void *kmem_alloc(size_t, unsigned int __nocast);
-extern void *kmem_realloc(void *, size_t, size_t,
- unsigned int __nocast);
-extern void *kmem_zalloc(size_t, unsigned int __nocast);
+extern void *kmem_alloc(size_t, gfp_t);
+extern void *kmem_realloc(void *, size_t, size_t, gfp_t);
+extern void *kmem_zalloc(size_t, gfp_t);
extern void kmem_free(void *, size_t);
typedef struct shrinker *kmem_shaker_t;
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned int __nocast flag)
+ gfp_t flag)
{
BUG_ON(dev->bus != &pci_bus_type);
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned int __nocast flag)
+ gfp_t flag)
{
BUG();
return NULL;
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag);
+ dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
return __dma_alloc_coherent(size, dma_handle, gfp);
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag);
+ dma_addr_t *dma_handle, gfp_t flag);
extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
*/
struct dma_mapping_ops {
void * (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag);
+ dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *dev, void *ptr,
int nelems, enum dma_data_direction direction);
extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned int __nocast flag);
+ dma_addr_t *dma_handle, gfp_t flag);
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
int atm_charge(struct atm_vcc *vcc,int truesize);
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
- unsigned int __nocast gfp_flags);
+ gfp_t gfp_flags);
int atm_pcr_goal(struct atm_trafprm *tp);
void vcc_release_async(struct atm_vcc *vcc, int reply);
extern struct bio_set *bioset_create(int, int, int);
extern void bioset_free(struct bio_set *);
-extern struct bio *bio_alloc(unsigned int __nocast, int);
-extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *);
+extern struct bio *bio_alloc(gfp_t, int);
+extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
extern int bio_hw_segments(struct request_queue *, struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
-extern struct bio *bio_clone(struct bio *, unsigned int __nocast);
+extern struct bio *bio_clone(struct bio *, gfp_t);
extern void bio_init(struct bio *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, int size);
struct buffer_head *__bread(struct block_device *, sector_t block, int size);
-struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags);
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void FASTCALL(__lock_buffer(struct buffer_head *bh));
int cn_add_callback(struct cb_id *, char *, void (*callback) (void *));
void cn_del_callback(struct cb_id *);
-int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast);
+int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
void cpuset_update_current_mems_allowed(void);
void cpuset_restrict_to_mems_allowed(unsigned long *nodes);
int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
-extern int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask);
+extern int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask);
extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
extern struct file_operations proc_cpuset_operations;
extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
return 1;
}
-static inline int cpuset_zone_allowed(struct zone *z,
- unsigned int __nocast gfp_mask)
+static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return 1;
}
void dma_pool_destroy(struct dma_pool *pool);
-void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags,
+void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
#endif
extern struct page *
-FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
+FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
-static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask,
+static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
if (unlikely(order >= MAX_ORDER))
}
#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order);
+extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
static inline struct page *
-alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
+alloc_pages(gfp_t gfp_mask, unsigned int order)
{
if (unlikely(order >= MAX_ORDER))
return NULL;
return alloc_pages_current(gfp_mask, order);
}
-extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
+extern struct page *alloc_page_vma(gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr);
#else
#define alloc_pages(gfp_mask, order) \
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order));
-extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
+extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order));
+extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask));
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask),0)
*/
extern kmem_cache_t *jbd_handle_cache;
-static inline handle_t *jbd_alloc_handle(unsigned int __nocast gfp_flags)
+static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
{
return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
}
};
extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- unsigned int __nocast gfp_mask, spinlock_t *lock);
-extern struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask,
+ gfp_t gfp_mask, spinlock_t *lock);
+extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
spinlock_t *lock);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int __kfifo_put(struct kfifo *fifo,
#include <linux/wait.h>
-typedef void * (mempool_alloc_t)(unsigned int __nocast gfp_mask, void *pool_data);
+typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
typedef struct mempool_s {
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data, int nid);
-extern int mempool_resize(mempool_t *pool, int new_min_nr,
- unsigned int __nocast gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
extern void mempool_destroy(mempool_t *pool);
-extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask);
+extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
extern void mempool_free(void *element, mempool_t *pool);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
* a slab that is passed in through pool_data.
*/
-void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data);
+void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
#endif /* _LINUX_MEMPOOL_H */
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
- __u32 group, unsigned int __nocast allocation);
+ __u32 group, gfp_t allocation);
extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
extern int netlink_register_notifier(struct notifier_block *nb);
extern int netlink_unregister_notifier(struct notifier_block *nb);
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
-static inline unsigned int __nocast mapping_gfp_mask(struct address_space * mapping)
+static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return mapping->flags & __GFP_BITS_MASK;
}
/* posix_acl.c */
-extern struct posix_acl *posix_acl_alloc(int, unsigned int __nocast);
-extern struct posix_acl *posix_acl_clone(const struct posix_acl *, unsigned int __nocast);
+extern struct posix_acl *posix_acl_alloc(int, gfp_t);
+extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t);
extern int posix_acl_valid(const struct posix_acl *);
extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
-extern struct posix_acl *posix_acl_from_mode(mode_t, unsigned int __nocast);
+extern struct posix_acl *posix_acl_from_mode(mode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *);
extern int posix_acl_create_masq(struct posix_acl *, mode_t *);
extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items);
-int radix_tree_preload(unsigned int __nocast gfp_mask);
+int radix_tree_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, int tag);
return security_ops->socket_getpeersec(sock, optval, optlen, len);
}
-static inline int security_sk_alloc(struct sock *sk, int family,
- unsigned int __nocast priority)
+static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
return security_ops->sk_alloc_security(sk, family, priority);
}
return -ENOPROTOOPT;
}
-static inline int security_sk_alloc(struct sock *sk, int family,
- unsigned int __nocast priority)
+static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
return 0;
}
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
- unsigned int __nocast priority, int fclone);
+ gfp_t priority, int fclone);
static inline struct sk_buff *alloc_skb(unsigned int size,
- unsigned int __nocast priority)
+ gfp_t priority)
{
return __alloc_skb(size, priority, 0);
}
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
- unsigned int __nocast priority)
+ gfp_t priority)
{
return __alloc_skb(size, priority, 1);
}
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size,
- unsigned int __nocast priority);
+ gfp_t priority);
extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
- unsigned int __nocast priority);
+ gfp_t priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb,
- unsigned int __nocast priority);
+ gfp_t priority);
extern struct sk_buff *pskb_copy(struct sk_buff *skb,
- unsigned int __nocast gfp_mask);
+ gfp_t gfp_mask);
extern int pskb_expand_head(struct sk_buff *skb,
int nhead, int ntail,
- unsigned int __nocast gfp_mask);
+ gfp_t gfp_mask);
extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
- unsigned int __nocast priority);
+ gfp_t priority);
extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len,
* NULL is returned on a memory allocation failure.
*/
static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
- unsigned int __nocast pri)
+ gfp_t pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_shared(skb)) {
* %NULL is returned on a memory allocation failure.
*/
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
- unsigned int __nocast pri)
+ gfp_t pri)
{
might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) {
* %NULL is returned in there is no free memory.
*/
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
if (likely(skb))
* If there is no free memory -ENOMEM is returned, otherwise zero
* is returned and the old skb data released.
*/
-extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp);
-static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp)
+extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
+static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
{
return __skb_linearize(skb, gfp);
}
void (*)(void *, kmem_cache_t *, unsigned long));
extern int kmem_cache_destroy(kmem_cache_t *);
extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
+extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
extern const char *kmem_cache_name(kmem_cache_t *);
-extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int __nocast gfpflags);
+extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
/* Size description struct for general caches. */
struct cache_sizes {
kmem_cache_t *cs_dmacachep;
};
extern struct cache_sizes malloc_sizes[];
-extern void *__kmalloc(size_t, unsigned int __nocast);
+extern void *__kmalloc(size_t, gfp_t);
-static inline void *kmalloc(size_t size, unsigned int __nocast flags)
+static inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
int i = 0;
return __kmalloc(size, flags);
}
-extern void *kzalloc(size_t, unsigned int __nocast);
+extern void *kzalloc(size_t, gfp_t);
/**
* kcalloc - allocate memory for an array. The memory is set to zero.
* @size: element size.
* @flags: the type of memory to allocate.
*/
-static inline void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
if (n != 0 && size > INT_MAX / n)
return NULL;
extern unsigned int ksize(const void *);
#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *,
- unsigned int __nocast flags, int node);
-extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node);
+extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
+extern void *kmalloc_node(size_t size, gfp_t flags, int node);
#else
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
{
return kmem_cache_alloc(cachep, flags);
}
-static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int node)
+static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc(size, flags);
}
extern void * memchr(const void *,int,__kernel_size_t);
#endif
-extern char *kstrdup(const char *s, unsigned int __nocast gfp);
+extern char *kstrdup(const char *s, gfp_t gfp);
#ifdef __cplusplus
}
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
/* linux/mm/oom_kill.c */
-extern void out_of_memory(unsigned int __nocast gfp_mask, int order);
+extern void out_of_memory(gfp_t gfp_mask, int order);
/* linux/mm/memory.c */
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1))
static inline struct ts_config *alloc_ts_config(size_t payload,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
struct ts_config *conf;
typedef __u64 __bitwise __be64;
#endif
+#ifdef __KERNEL__
+typedef unsigned __nocast gfp_t;
+#endif
+
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
extern void *vmalloc(unsigned long size);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
-extern void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot);
-extern void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot);
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot);
extern void vfree(void *addr);
extern void *vmap(struct page **pages, unsigned int count,
};
#define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb))
-static inline struct sk_buff *bt_skb_alloc(unsigned int len, unsigned int __nocast how)
+static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{
struct sk_buff *skb;
u8 xon_char, u8 xoff_char, u16 param_mask);
/* ---- RFCOMM DLCs (channels) ---- */
-struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio);
+struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio);
void rfcomm_dlc_free(struct rfcomm_dlc *d);
int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel);
int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
extern void dn_nsp_send_oth_ack(struct sock *sk);
extern void dn_nsp_delayed_ack(struct sock *sk);
extern void dn_send_conn_ack(struct sock *sk);
-extern void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp);
+extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
extern void dn_nsp_send_disc(struct sock *sk, unsigned char type,
- unsigned short reason, unsigned int __nocast gfp);
+ unsigned short reason, gfp_t gfp);
extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
unsigned short reason);
extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
extern void dn_nsp_output(struct sock *sk);
extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
-extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, unsigned int __nocast gfp, int oob);
+extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob);
extern unsigned long dn_nsp_persist(struct sock *sk);
extern int dn_nsp_xmit_timeout(struct sock *sk);
extern int dn_nsp_rx(struct sk_buff *);
extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri);
+extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
#define NSP_REASON_OK 0 /* No error */
GNU General Public License for more details.
*******************************************************************************/
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri);
+extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern struct sock *inet_csk_clone(struct sock *sk,
const struct request_sock *req,
- const unsigned int __nocast priority);
+ const gfp_t priority);
enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb);
extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb);
-extern int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri,
+extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
char *o_buf, int o_len, char *n_buf, int n_len);
extern int ip_vs_app_init(void);
extern void ip_vs_app_cleanup(void);
return skb->cb[sizeof(skb->cb) - 1];
}
-extern struct sock *llc_sk_alloc(int family, unsigned int __nocast priority,
+extern struct sock *llc_sk_alloc(int family, gfp_t priority,
struct proto *prot);
extern void llc_sk_free(struct sock *sk);
*/
extern struct sock *sctp_get_ctl_sock(void);
extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
- sctp_scope_t, unsigned int __nocast gfp,
+ sctp_scope_t, gfp_t gfp,
int flags);
extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
int sctp_chunk_iif(const struct sctp_chunk *);
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
struct sctp_chunk *,
- unsigned int __nocast gfp);
+ gfp_t gfp);
__u32 sctp_generate_verification_tag(void);
void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
/* Prototypes for chunk-building functions. */
struct sctp_chunk *sctp_make_init(const struct sctp_association *,
const struct sctp_bind_addr *,
- unsigned int __nocast gfp, int vparam_len);
+ gfp_t gfp, int vparam_len);
struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *,
const struct sctp_chunk *,
- const unsigned int __nocast gfp,
+ const gfp_t gfp,
const int unkparam_len);
struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *,
const struct sctp_chunk *);
struct sctp_endpoint *,
struct sctp_association *asoc,
void *event_arg,
- unsigned int __nocast gfp);
+ gfp_t gfp);
/* 2nd level prototypes */
void sctp_generate_t3_rtx_event(unsigned long peer);
struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *,
const struct sctp_association *,
struct sctp_chunk *,
- unsigned int __nocast gfp, int *err,
+ gfp_t gfp, int *err,
struct sctp_chunk **err_chk_p);
int sctp_addip_addr_config(struct sctp_association *, sctp_param_t,
struct sockaddr_storage*, int);
};
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
- unsigned int __nocast gfp);
+ gfp_t gfp);
void sctp_ssnmap_free(struct sctp_ssnmap *map);
void sctp_ssnmap_clear(struct sctp_ssnmap *map);
};
struct sctp_transport *sctp_transport_new(const union sctp_addr *,
- unsigned int __nocast);
+ gfp_t);
void sctp_transport_set_owner(struct sctp_transport *,
struct sctp_association *);
void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
void sctp_bind_addr_free(struct sctp_bind_addr *);
int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
- sctp_scope_t scope, unsigned int __nocast gfp,
+ sctp_scope_t scope, gfp_t gfp,
int flags);
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
- unsigned int __nocast gfp);
+ gfp_t gfp);
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *);
struct sctp_sock *opt);
union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
int *addrs_len,
- unsigned int __nocast gfp);
+ gfp_t gfp);
int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
- __u16 port, unsigned int __nocast gfp);
+ __u16 port, gfp_t gfp);
sctp_scope_t sctp_scope(const union sctp_addr *);
int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
}
/* These are function signatures for manipulating endpoints. */
-struct sctp_endpoint *sctp_endpoint_new(struct sock *, unsigned int __nocast);
+struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
void sctp_endpoint_free(struct sctp_endpoint *);
void sctp_endpoint_put(struct sctp_endpoint *);
void sctp_endpoint_hold(struct sctp_endpoint *);
struct sctp_chunk **err_chunk);
int sctp_process_init(struct sctp_association *, sctp_cid_t cid,
const union sctp_addr *peer,
- sctp_init_chunk_t *init, unsigned int __nocast gfp);
+ sctp_init_chunk_t *init, gfp_t gfp);
__u32 sctp_generate_tag(const struct sctp_endpoint *);
__u32 sctp_generate_tsn(const struct sctp_endpoint *);
struct sctp_association *
sctp_association_new(const struct sctp_endpoint *, const struct sock *,
- sctp_scope_t scope, unsigned int __nocast gfp);
+ sctp_scope_t scope, gfp_t gfp);
void sctp_association_free(struct sctp_association *);
void sctp_association_put(struct sctp_association *);
void sctp_association_hold(struct sctp_association *);
const union sctp_addr *laddr);
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *,
const union sctp_addr *address,
- const unsigned int __nocast gfp,
+ const gfp_t gfp,
const int peer_state);
void sctp_assoc_del_peer(struct sctp_association *asoc,
const union sctp_addr *addr);
void sctp_assoc_set_primary(struct sctp_association *,
struct sctp_transport *);
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *,
- unsigned int __nocast);
+ gfp_t);
int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
struct sctp_cookie*,
- unsigned int __nocast gfp);
+ gfp_t gfp);
int sctp_cmp_addr_exact(const union sctp_addr *ss1,
const union sctp_addr *ss2);
__u16 error,
__u16 outbound,
__u16 inbound,
- unsigned int __nocast gfp);
+ gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
const struct sctp_association *asoc,
int flags,
int state,
int error,
- unsigned int __nocast gfp);
+ gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
__u16 flags,
- unsigned int __nocast gfp);
+ gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
__u16 flags,
__u32 error,
- unsigned int __nocast gfp);
+ gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const struct sctp_association *asoc,
__u16 flags,
- unsigned int __nocast gfp);
+ gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc,
- __u32 indication, unsigned int __nocast gfp);
+ __u32 indication, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication(
- const struct sctp_association *asoc, unsigned int __nocast gfp);
+ const struct sctp_association *asoc, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
struct sctp_chunk *chunk,
- unsigned int __nocast gfp);
+ gfp_t gfp);
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
void sctp_ulpq_free(struct sctp_ulpq *);
/* Add a new DATA chunk for processing. */
-int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *,
- unsigned int __nocast);
+int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
/* Add a new event for propagation to the ULP. */
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
/* Renege previously received chunks. */
-void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *,
- unsigned int __nocast);
+void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
/* Perform partial delivery. */
-void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *,
- unsigned int __nocast);
+void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
/* Abort the partial delivery. */
-void sctp_ulpq_abort_pd(struct sctp_ulpq *, unsigned int __nocast);
+void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
/* Clear the partial data delivery condition on this socket. */
int sctp_clear_pd(struct sock *sk);
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
extern struct sock *sk_alloc(int family,
- unsigned int __nocast priority,
+ gfp_t priority,
struct proto *prot, int zero_it);
extern void sk_free(struct sock *sk);
extern struct sock *sk_clone(const struct sock *sk,
- const unsigned int __nocast priority);
+ const gfp_t priority);
extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force,
- unsigned int __nocast priority);
+ gfp_t priority);
extern struct sk_buff *sock_rmalloc(struct sock *sk,
unsigned long size, int force,
- unsigned int __nocast priority);
+ gfp_t priority);
extern void sock_wfree(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb);
int noblock,
int *errcode);
extern void *sock_kmalloc(struct sock *sk, int size,
- unsigned int __nocast priority);
+ gfp_t priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
extern void sk_send_sigurg(struct sock *sk);
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
int size, int mem,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sk_buff *skb;
int hdr_len;
static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
int size,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
return sk_stream_alloc_pskb(sk, size, 0, gfp);
}
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
}
-static inline unsigned int __nocast gfp_any(void)
+static inline gfp_t gfp_any(void)
{
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
extern void tcp_send_partial(struct sock *);
extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk,
- unsigned int __nocast priority);
+extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
extern int tcp_send_synack(struct sock *);
extern void tcp_push_one(struct sock *, unsigned int mss_now);
extern void tcp_send_ack(struct sock *sk);
}
#endif
-struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp);
+struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
u32 remote_qpn, u16 pkey_index,
struct ib_ah *ah, int rmpp_active,
int hdr_len, int data_len,
- unsigned int __nocast gfp_mask);
+ gfp_t gfp_mask);
/**
* ib_free_send_mad - Returns data buffers used to send a MAD.
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_path_rec *resp,
void *context),
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_service_rec *resp,
void *context),
ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, unsigned int __nocast gfp_mask,
+ int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
size_t sioc,
struct kvec *siov,
uint8_t rxhdr_flags,
- unsigned int __nocast alloc_flags,
+ gfp_t alloc_flags,
int dup_data,
size_t *size_sent);
uint8_t type,
int count,
struct kvec *diov,
- unsigned int __nocast alloc_flags,
+ gfp_t alloc_flags,
struct rxrpc_message **_msg);
extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
void snd_memory_done(void);
int snd_memory_info_init(void);
int snd_memory_info_done(void);
-void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags);
-void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags);
-void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags);
+void *snd_hidden_kmalloc(size_t size, gfp_t flags);
+void *snd_hidden_kzalloc(size_t size, gfp_t flags);
+void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags);
void snd_hidden_kfree(const void *obj);
void *snd_hidden_vmalloc(unsigned long size);
void snd_hidden_vfree(void *obj);
-char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags);
+char *snd_hidden_kstrdup(const char *s, gfp_t flags);
#define kmalloc(size, flags) snd_hidden_kmalloc(size, flags)
#define kzalloc(size, flags) snd_hidden_kzalloc(size, flags)
#define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags)
#ifdef CONFIG_SND_DEBUG_MEMORY
#include <linux/slab.h>
#include <linux/vmalloc.h>
-void *snd_wrapper_kmalloc(size_t, unsigned int __nocast);
+void *snd_wrapper_kmalloc(size_t, gfp_t);
#undef kmalloc
void snd_wrapper_kfree(const void *);
#undef kfree
}
static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx,
- unsigned int __nocast gfp_mask, int type)
+ gfp_t gfp_mask, int type)
{
unsigned long flags;
struct audit_buffer *ab = NULL;
* GFP_USER - only nodes in current tasks mems allowed ok.
**/
-int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
+int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
* struct kfifo with kfree().
*/
struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- unsigned int __nocast gfp_mask, spinlock_t *lock)
+ gfp_t gfp_mask, spinlock_t *lock)
{
struct kfifo *fifo;
*
* The size will be rounded-up to a power of 2.
*/
-struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, spinlock_t *lock)
+struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
{
unsigned char *buffer;
struct kfifo *ret;
return sig;
}
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
int override_rlimit)
{
struct sigqueue *q = NULL;
* success, return zero, with preemption disabled. On error, return -ENOMEM
* with preemption not disabled.
*/
-int radix_tree_preload(unsigned int __nocast gfp_mask)
+int radix_tree_preload(gfp_t gfp_mask)
{
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
struct ts_config *conf;
struct ts_bm *bm;
}
static struct ts_config *fsm_init(const void *pattern, unsigned int len,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
int i, err = -EINVAL;
struct ts_config *conf;
}
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
struct ts_config *conf;
struct ts_kmp *kmp;
static mempool_t *page_pool, *isa_page_pool;
-static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data)
+static void *page_pool_alloc(gfp_t gfp_mask, void *data)
{
unsigned int gfp = gfp_mask | (unsigned int) (long) data;
}
/* Return a zonelist representing a mempolicy */
-static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy)
+static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
{
int nd;
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
-static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid)
+static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid)
{
struct zonelist *zl;
struct page *page;
* Should be called with the mm_sem of the vma hold.
*/
struct page *
-alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr)
+alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
* 1) it's ok to take cpuset_sem (can WAIT), and
* 2) allocating for current task (not interrupt).
*/
-struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order)
+struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = current->mempolicy;
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
*/
-int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask)
+int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
{
void *element;
void **new_elements;
* *never* fails when called from process contexts. (it might
* fail if called from an IRQ context.)
*/
-void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
+void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
void *element;
unsigned long flags;
/*
* A commonly used alloc and free fn.
*/
-void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data)
+void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
kmem_cache_t *mem = (kmem_cache_t *) pool_data;
return kmem_cache_alloc(mem, gfp_mask);
kfree(addr);
}
-void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
- pgprot_t prot)
+void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(unsigned int __nocast gfp_mask, int order)
+void out_of_memory(gfp_t gfp_mask, int order)
{
struct mm_struct *mm = NULL;
task_t * p;
free_hot_cold_page(page, 1);
}
-static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags)
+static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
int i;
* or two.
*/
static struct page *
-buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags)
+buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
{
unsigned long flags;
struct page *page = NULL;
}
static inline int
-should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
+should_reclaim_zone(struct zone *z, gfp_t gfp_mask)
{
if (!z->reclaim_pages)
return 0;
* This is the 'heart' of the zoned buddy allocator.
*/
struct page * fastcall
-__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
+__alloc_pages(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
const int wait = gfp_mask & __GFP_WAIT;
/*
* Common helper functions.
*/
-fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)
+fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
struct page * page;
page = alloc_pages(gfp_mask, order);
EXPORT_SYMBOL(__get_free_pages);
-fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask)
+fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
{
struct page * page;
#include <linux/writeback.h>
#include <asm/pgtable.h>
-static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index,
+static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
struct page *page, bio_end_io_t end_io)
{
struct bio *bio;
}
static inline struct page *
-shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info,
- unsigned long idx)
+shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
{
return alloc_page(gfp | __GFP_ZERO);
}
return cachep->array[smp_processor_id()];
}
-static inline kmem_cache_t *__find_general_cachep(size_t size,
- unsigned int __nocast gfpflags)
+static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
return csizep->cs_cachep;
}
-kmem_cache_t *kmem_find_general_cachep(size_t size,
- unsigned int __nocast gfpflags)
+kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
+static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{
struct page *page;
void *addr;
/* Get the memory for a slab management obj. */
static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
- int colour_off, unsigned int __nocast local_flags)
+ int colour_off, gfp_t local_flags)
{
struct slab *slabp;
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
+static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{
struct slab *slabp;
void *objp;
#define check_slabp(x,y) do { } while(0)
#endif
-static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags)
+static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
{
int batchcount;
struct kmem_list3 *l3;
}
static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags)
+cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
#if DEBUG
static void *
cache_alloc_debugcheck_after(kmem_cache_t *cachep,
- unsigned int __nocast flags, void *objp, void *caller)
+ gfp_t flags, void *objp, void *caller)
{
if (!objp)
return objp;
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
-static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
+static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{
void* objp;
struct array_cache *ac;
return objp;
}
-static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
+static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{
unsigned long save_flags;
void* objp;
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
+void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags);
}
* New and improved: it will now make sure that the object gets
* put on the correct node list so that there is no false sharing.
*/
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid)
+void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{
unsigned long save_flags;
void *ptr;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-void *kmalloc_node(size_t size, unsigned int __nocast flags, int node)
+void *kmalloc_node(size_t size, gfp_t flags, int node)
{
kmem_cache_t *cachep;
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void *__kmalloc(size_t size, unsigned int __nocast flags)
+void *__kmalloc(size_t size, gfp_t flags)
{
kmem_cache_t *cachep;
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*/
-void *kzalloc(size_t size, unsigned int __nocast flags)
+void *kzalloc(size_t size, gfp_t flags)
{
void *ret = kmalloc(size, flags);
if (ret)
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*/
-char *kstrdup(const char *s, unsigned int __nocast gfp)
+char *kstrdup(const char *s, gfp_t gfp)
{
size_t len;
char *buf;
* but sets SwapCache flag and private instead of mapping and index.
*/
static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
int error;
EXPORT_SYMBOL(vmap);
-void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot)
+void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*/
-void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot)
+void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
struct vm_struct *area;
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
- unsigned int __nocast gfp_flags)
+ gfp_t gfp_flags)
{
struct sock *sk = sk_atm(vcc);
int guess = atm_guess_pdu2truesize(pdu_size);
.obj_size = sizeof(struct l2cap_pinfo)
};
-static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio)
+static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
d->rx_credits = RFCOMM_DEFAULT_CREDITS;
}
-struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio)
+struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
{
struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio);
if (!d)
.obj_size = sizeof(struct rfcomm_pinfo)
};
-static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio)
+static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, gfp_t prio)
{
struct rfcomm_dlc *d;
struct sock *sk;
skb->destructor = rfcomm_wfree;
}
-static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, unsigned int __nocast priority)
+static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority)
{
if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) {
struct sk_buff *skb = alloc_skb(size, priority);
.obj_size = sizeof(struct sco_pinfo)
};
-static struct sock *sco_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio)
+static struct sock *sco_sock_alloc(struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
#endif
/* Keep head the same: replace data */
-int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask)
+int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
{
unsigned int size;
u8 *data;
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
-struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask,
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int fclone)
{
struct sk_buff *skb;
*/
struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
struct sk_buff *skb;
u8 *data;
* %GFP_ATOMIC.
*/
-struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;
* header is going to be modified. Use pskb_copy() instead.
*/
-struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask)
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb->data - skb->head;
/*
* The returned buffer has a reference count of 1.
*/
-struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask)
+struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
{
/*
* Allocate the copy buffer
*/
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
int i;
u8 *data;
*/
struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
/*
* Allocate the copy buffer
* @prot: struct proto associated with this new sock instance
* @zero_it: if we should zero the newly allocated sock
*/
-struct sock *sk_alloc(int family, unsigned int __nocast priority,
+struct sock *sk_alloc(int family, gfp_t priority,
struct proto *prot, int zero_it)
{
struct sock *sk = NULL;
module_put(owner);
}
-struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority)
+struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
{
struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
* Allocate a skb from the socket's send buffer.
*/
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
- unsigned int __nocast priority)
+ gfp_t priority)
{
if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
struct sk_buff * skb = alloc_skb(size, priority);
* Allocate a skb from the socket's receive buffer.
*/
struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
- unsigned int __nocast priority)
+ gfp_t priority)
{
if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(size, priority);
/*
* Allocate a memory block from the socket's option memory buffer.
*/
-void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority)
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
{
if ((unsigned)size <= sysctl_optmem_max &&
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
}
struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len,
- const unsigned int __nocast priority)
+ const gfp_t priority)
{
struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority);
#ifdef CONFIG_IP_DCCP_ACKVEC
extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len,
- const unsigned int __nocast priority);
+ const gfp_t priority);
extern void dccp_ackvec_free(struct dccp_ackvec *av);
extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
}
#else /* CONFIG_IP_DCCP_ACKVEC */
static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len,
- const unsigned int __nocast priority)
+ const gfp_t priority)
{
return NULL;
}
static inline struct dccp_li_hist_entry *
dccp_li_hist_entry_new(struct dccp_li_hist *hist,
- const unsigned int __nocast prio)
+ const gfp_t prio)
{
return kmem_cache_alloc(hist->dccplih_slab, prio);
}
static inline struct dccp_tx_hist_entry *
dccp_tx_hist_entry_new(struct dccp_tx_hist *hist,
- const unsigned int __nocast prio)
+ const gfp_t prio)
{
struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab,
prio);
const struct sock *sk,
const u32 ndp,
const struct sk_buff *skb,
- const unsigned int __nocast prio)
+ const gfp_t prio)
{
struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab,
prio);
.obj_size = sizeof(struct dn_sock),
};
-static struct sock *dn_alloc_sock(struct socket *sock,
- unsigned int __nocast gfp)
+static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp)
{
struct dn_scp *scp;
struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1);
return rv;
}
-static int dn_confirm_accept(struct sock *sk, long *timeo,
- unsigned int __nocast allocation)
+static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
{
struct dn_scp *scp = DN_SK(sk);
DEFINE_WAIT(wait);
* The eventual aim is for each socket to have a cached header size
* for its outgoing packets, and to set hdr from this when sk != NULL.
*/
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size,
- unsigned int __nocast pri)
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri)
{
struct sk_buff *skb;
int hdr = 64;
* Returns: The number of times the packet has been sent previously
*/
static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
struct sk_buff *skb2;
}
void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
- unsigned int __nocast gfp, int oth)
+ gfp_t gfp, int oth)
{
struct dn_scp *scp = DN_SK(sk);
struct dn_skb_cb *cb = DN_SKB_CB(skb);
return 0;
}
-void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp)
+void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
{
struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb = NULL;
static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
- unsigned short reason, unsigned int __nocast gfp,
+ unsigned short reason, gfp_t gfp,
struct dst_entry *dst,
int ddl, unsigned char *dd, __u16 rem, __u16 loc)
{
void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
- unsigned short reason, unsigned int __nocast gfp)
+ unsigned short reason, gfp_t gfp)
{
struct dn_scp *scp = DN_SK(sk);
int ddl = 0;
{
struct dn_skb_cb *cb = DN_SKB_CB(skb);
int ddl = 0;
- unsigned int __nocast gfp = GFP_ATOMIC;
+ gfp_t gfp = GFP_ATOMIC;
dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl,
NULL, cb->src_port, cb->dst_port);
struct dn_scp *scp = DN_SK(sk);
struct sk_buff *skb;
unsigned char *ptr;
- unsigned int __nocast gfp = GFP_ATOMIC;
+ gfp_t gfp = GFP_ATOMIC;
if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
return;
unsigned char menuver;
struct dn_skb_cb *cb;
unsigned char type = 1;
- unsigned int __nocast allocation =
- (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
+ gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
if (!skb)
}
static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
struct ieee80211_txb *txb;
int i;
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
- const unsigned int __nocast priority)
+ const gfp_t priority)
{
struct sock *newsk = sk_clone(sk, priority);
/*
* Replace a segment of data with a new segment
*/
-int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri,
+int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
char *o_buf, int o_len, char *n_buf, int n_len)
{
struct iphdr *iph;
* was unread data in the receive queue. This behavior is recommended
* by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
*/
-void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority)
+void tcp_send_active_reset(struct sock *sk, gfp_t priority)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
}
static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
- unsigned int __nocast allocation, struct sock *sk)
+ gfp_t allocation, struct sock *sk)
{
int err = -ENOBUFS;
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb, unsigned int __nocast allocation,
+static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
int broadcast_flags, struct sock *one_sk)
{
struct sock *sk;
}
static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig,
- unsigned int __nocast allocation)
+ gfp_t allocation)
{
struct sk_buff *skb;
struct sadb_msg *hdr;
* Allocates a LLC sock and initializes it. Returns the new LLC sock
* or %NULL if there's no memory available for one
*/
-struct sock *llc_sk_alloc(int family, unsigned int __nocast priority,
- struct proto *prot)
+struct sock *llc_sk_alloc(int family, gfp_t priority, struct proto *prot)
{
struct sock *sk = sk_alloc(family, priority, prot, 1);
int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
{
- unsigned int __nocast allocation =
- in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ gfp_t allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
int err = 0;
NETLINK_CB(skb).dst_group = group;
}
static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
- unsigned int __nocast allocation)
+ gfp_t allocation)
{
int delta;
}
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
- u32 group, unsigned int __nocast allocation)
+ u32 group, gfp_t allocation)
{
struct netlink_broadcast_data info;
struct hlist_node *node;
size_t sioc,
struct kvec *siov,
u8 rxhdr_flags,
- unsigned int __nocast alloc_flags,
+ gfp_t alloc_flags,
int dup_data,
size_t *size_sent)
{
uint8_t type,
int dcount,
struct kvec diov[],
- unsigned int __nocast alloc_flags,
+ gfp_t alloc_flags,
struct rxrpc_message **_msg)
{
struct rxrpc_message *msg;
const struct sctp_endpoint *ep,
const struct sock *sk,
sctp_scope_t scope,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_sock *sp;
int i;
struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
const struct sock *sk,
sctp_scope_t scope,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_association *asoc;
/* Add a transport address to an association. */
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
const union sctp_addr *addr,
- const unsigned int __nocast gfp,
+ const gfp_t gfp,
const int peer_state)
{
struct sctp_transport *peer;
* local endpoint and the remote peer.
*/
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
sctp_scope_t scope;
int flags;
/* Build the association's bind address list from the cookie. */
int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
struct sctp_cookie *cookie,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
int var_size3 = cookie->raw_addr_list_len;
/* Forward declarations for internal helpers. */
static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *,
- sctp_scope_t scope, unsigned int __nocast gfp,
+ sctp_scope_t scope, gfp_t gfp,
int flags);
static void sctp_bind_addr_clean(struct sctp_bind_addr *);
*/
int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
- sctp_scope_t scope, unsigned int __nocast gfp,
+ sctp_scope_t scope, gfp_t gfp,
int flags)
{
struct sctp_sockaddr_entry *addr;
/* Add an address to the bind address list in the SCTP_bind_addr structure. */
int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_sockaddr_entry *addr;
*/
union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
int *addrs_len,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
union sctp_params addrparms;
union sctp_params retval;
* address parameters).
*/
int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
- int addrs_len, __u16 port, unsigned int __nocast gfp)
+ int addrs_len, __u16 port, gfp_t gfp)
{
union sctp_addr_param *rawaddr;
struct sctp_paramhdr *param;
/* Copy out addresses from the global local address list. */
static int sctp_copy_one_addr(struct sctp_bind_addr *dest,
union sctp_addr *addr,
- sctp_scope_t scope, unsigned int __nocast gfp,
+ sctp_scope_t scope, gfp_t gfp,
int flags)
{
int error = 0;
}
/* Allocate and initialize datamsg. */
-SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(unsigned int __nocast gfp)
+SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
{
struct sctp_datamsg *msg;
msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
*/
static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
struct sock *sk,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_sock *sp = sctp_sk(sk);
memset(ep, 0, sizeof(struct sctp_endpoint));
/* Create a sctp_endpoint with all that boring stuff initialized.
* Returns NULL if there isn't enough memory.
*/
-struct sctp_endpoint *sctp_endpoint_new(struct sock *sk,
- unsigned int __nocast gfp)
+struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
{
struct sctp_endpoint *ep;
/* Copy the local addresses which are valid for 'scope' into 'bp'. */
int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
- unsigned int __nocast gfp, int copy_flags)
+ gfp_t gfp, int copy_flags)
{
struct sctp_sockaddr_entry *addr;
int error = 0;
static int sctp_process_param(struct sctp_association *asoc,
union sctp_params param,
const union sctp_addr *peer_addr,
- unsigned int __nocast gfp);
+ gfp_t gfp);
/* What was the inbound interface for this chunk? */
int sctp_chunk_iif(const struct sctp_chunk *chunk)
*/
struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
const struct sctp_bind_addr *bp,
- unsigned int __nocast gfp, int vparam_len)
+ gfp_t gfp, int vparam_len)
{
sctp_inithdr_t init;
union sctp_params addrs;
struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
- unsigned int __nocast gfp, int unkparam_len)
+ gfp_t gfp, int unkparam_len)
{
sctp_inithdr_t initack;
struct sctp_chunk *retval;
/* Create a CLOSED association to use with an incoming packet. */
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep,
struct sctp_chunk *chunk,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_association *asoc;
struct sk_buff *skb;
struct sctp_association *sctp_unpack_cookie(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
- struct sctp_chunk *chunk, unsigned int __nocast gfp,
+ struct sctp_chunk *chunk, gfp_t gfp,
int *error, struct sctp_chunk **errp)
{
struct sctp_association *retval = NULL;
*/
int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
const union sctp_addr *peer_addr,
- sctp_init_chunk_t *peer_init, unsigned int __nocast gfp)
+ sctp_init_chunk_t *peer_init, gfp_t gfp)
{
union sctp_params param;
struct sctp_transport *transport;
static int sctp_process_param(struct sctp_association *asoc,
union sctp_params param,
const union sctp_addr *peer_addr,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
union sctp_addr addr;
int i;
void *event_arg,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
- unsigned int __nocast gfp);
+ gfp_t gfp);
static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_state_t state,
struct sctp_endpoint *ep,
void *event_arg,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
- unsigned int __nocast gfp);
+ gfp_t gfp);
/********************************************************************
* Helper functions
struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_init_chunk_t *peer_init,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
int error;
struct sctp_endpoint *ep,
struct sctp_association *asoc,
void *event_arg,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
sctp_cmd_seq_t commands;
const sctp_sm_table_entry_t *state_fn;
void *event_arg,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
int error;
void *event_arg,
sctp_disposition_t status,
sctp_cmd_seq_t *commands,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
int error = 0;
int force;
* Allocate room to store at least 'len' contiguous TSNs.
*/
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_ssnmap *retval;
int size;
/* Initialize a new transport from provided memory. */
static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
const union sctp_addr *addr,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
/* Copy in the address. */
peer->ipaddr = *addr;
/* Allocate and initialize a new transport. */
struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_transport *transport;
/* Create a new sctp_ulpevent. */
SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sk_buff *skb;
struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
const struct sctp_association *asoc,
__u16 flags, __u16 state, __u16 error, __u16 outbound,
- __u16 inbound, unsigned int __nocast gfp)
+ __u16 inbound, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_assoc_change *sac;
struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
const struct sctp_association *asoc,
const struct sockaddr_storage *aaddr,
- int flags, int state, int error, unsigned int __nocast gfp)
+ int flags, int state, int error, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_paddr_change *spc;
*/
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc, struct sctp_chunk *chunk,
- __u16 flags, unsigned int __nocast gfp)
+ __u16 flags, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_remote_error *sre;
*/
struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
const struct sctp_association *asoc, struct sctp_chunk *chunk,
- __u16 flags, __u32 error, unsigned int __nocast gfp)
+ __u16 flags, __u32 error, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_send_failed *ssf;
*/
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const struct sctp_association *asoc,
- __u16 flags, unsigned int __nocast gfp)
+ __u16 flags, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_shutdown_event *sse;
* 5.3.1.6 SCTP_ADAPTION_INDICATION
*/
struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication(
- const struct sctp_association *asoc, unsigned int __nocast gfp)
+ const struct sctp_association *asoc, gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_adaption_event *sai;
*/
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
struct sctp_chunk *chunk,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_ulpevent *event = NULL;
struct sk_buff *skb;
*/
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
const struct sctp_association *asoc, __u32 indication,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_pdapi_event *pd;
/* Process an incoming DATA chunk. */
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sk_buff_head temp;
sctp_data_chunk_t *hdr;
/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_association *asoc;
/* Renege some packets to make room for an incoming chunk. */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
- unsigned int __nocast gfp)
+ gfp_t gfp)
{
struct sctp_association *asoc;
__u16 needed, freed;
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
-void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, unsigned int __nocast gfp)
+void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *ev = NULL;
struct sock *sk;
void *
rpc_malloc(struct rpc_task *task, size_t size)
{
- unsigned int __nocast gfp;
+ gfp_t gfp;
if (task->tk_flags & RPC_TASK_SWAPPER)
gfp = GFP_ATOMIC;
* SPD calls.
*/
-struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp)
+struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
{
struct xfrm_policy *policy;
static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle,
- unsigned int __nocast flags)
+ gfp_t flags)
{
void *ret;
u64 dma_mask, coherent_dma_mask;
}
}
-static void *__snd_kmalloc(size_t size, unsigned int __nocast flags, void *caller)
+static void *__snd_kmalloc(size_t size, gfp_t flags, void *caller)
{
unsigned long cpu_flags;
struct snd_alloc_track *t;
}
#define _snd_kmalloc(size, flags) __snd_kmalloc((size), (flags), __builtin_return_address(0));
-void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags)
+void *snd_hidden_kmalloc(size_t size, gfp_t flags)
{
return _snd_kmalloc(size, flags);
}
-void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags)
+void *snd_hidden_kzalloc(size_t size, gfp_t flags)
{
void *ret = _snd_kmalloc(size, flags);
if (ret)
}
EXPORT_SYMBOL(snd_hidden_kzalloc);
-void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags)
+void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags)
{
void *ret = NULL;
if (n != 0 && size > INT_MAX / n)
snd_wrapper_vfree(obj);
}
-char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags)
+char *snd_hidden_kstrdup(const char *s, gfp_t flags)
{
int len;
char *buf;
iwffff_xenv_t *ex,
char __user **data,
long *len,
- unsigned int __nocast gfp_mask)
+ gfp_t gfp_mask)
{
__u32 stype;
iwffff_env_record_t *rp, *rp_last;
#include <linux/fs.h>
#ifdef CONFIG_SND_DEBUG_MEMORY
-void *snd_wrapper_kmalloc(size_t size, unsigned int __nocast flags)
+void *snd_wrapper_kmalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags);
}