ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
bestnode);
return ptr;
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
early_cpu_to_node(cpu));
}
cpu, size, __pa(ptr));
} else {
ptr = memblock_alloc_try_nid(size, align, goal,
- BOOTMEM_ALLOC_ACCESSIBLE, node);
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
"%016lx\n", cpu, size, node, __pa(ptr));
}
cpu, size, __pa(ptr));
} else {
ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
- BOOTMEM_ALLOC_ACCESSIBLE,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
{
if (panic)
return memblock_alloc_try_nid(size, size,
- __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
else
return memblock_alloc_try_nid_nopanic(size, size,
- __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/mmdebug.h>
addr = memblock_alloc_try_nid_raw(
huge_page_size(h), huge_page_size(h),
- 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
+ 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
if (addr) {
/*
* Use the beginning of the huge page to store the
static __init void *early_alloc(size_t size, int node)
{
return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, node);
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
}
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
* hold the requested memory.
*
* The allocation is performed from memory region limited by
- * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
+ * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
*
* The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
*
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
* @min_addr: the lower bound of the memory region from where the allocation
* is preferred (phys address)
* @max_addr: the upper bound of the memory region from where the allocation
- * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
base = memblock_alloc_try_nid_nopanic(
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!base)
return -ENOMEM;
NODE_DATA(nid)->node_page_ext = base;
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/memremap.h>
#include <linux/highmem.h>
#include <linux/slab.h>
unsigned long goal)
{
return memblock_alloc_try_nid_raw(size, align, goal,
- BOOTMEM_ALLOC_ACCESSIBLE, node);
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/highmem.h>
#include <linux/export.h>
map = memblock_alloc_try_nid(size,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
return map;
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
sparsemap_buf =
memblock_alloc_try_nid_raw(size, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
sparsemap_buf_end = sparsemap_buf + size;
}