}
#define PG_dcache_dirty PG_arch_1
-#define PG_dcache_cpu_shift 24
-#define PG_dcache_cpu_mask (256 - 1)
+#define PG_dcache_cpu_shift 24UL
+#define PG_dcache_cpu_mask (256UL - 1UL)
#if NR_CPUS > 256
#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
min_low_pfn, bootmap_pfn, max_low_pfn);
#endif
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
- (phys_base >> PAGE_SHIFT),
- end_pfn);
+ min_low_pfn, end_pfn);
/* Now register the available physical memory with the
* allocator.
pages_avail = 0;
last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
- max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT);
+ max_mapnr = last_valid_pfn;
kernel_physical_mapping_init();
{
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
- unsigned long npages;
int znum;
for (znum = 0; znum < MAX_NR_ZONES; znum++)
zones_size[znum] = zholes_size[znum] = 0;
- npages = end_pfn - (phys_base >> PAGE_SHIFT);
- zones_size[ZONE_DMA] = npages;
- zholes_size[ZONE_DMA] = npages - pages_avail;
+ zones_size[ZONE_DMA] = end_pfn;
+ zholes_size[ZONE_DMA] = end_pfn - pages_avail;
free_area_init_node(0, &contig_page_data, zones_size,
- phys_base >> PAGE_SHIFT, zholes_size);
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT,
+ zholes_size);
}
device_scan();