sysc_nr_ok:
mvc SP_ARGS(4,%r15),SP_R7(%r15)
sysc_do_restart:
+ l %r8,BASED(.Lsysc_table)
tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
- l %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+ l %r8,0(%r7,%r8) # get system call addr.
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
basr %r14,%r1
clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
bnl BASED(sysc_tracenogo)
+ l %r8,BASED(.Lsysc_table)
l %r7,SP_R2(%r15) # strace might have changed the
sll %r7,2 # system call
- l %r8,sys_call_table-system_call(%r7,%r13)
+ l %r8,0(%r7,%r8)
sysc_tracego:
lm %r3,%r6,SP_R3(%r15)
l %r2,SP_ORIG_R2(%r15)
.Ltrace: .long syscall_trace
.Lvfork: .long sys_vfork
.Lschedtail: .long schedule_tail
+.Lsysc_table: .long sys_call_table
.Lcritical_start:
.long __critical_start + 0x80000000
.Lcleanup_critical:
.long cleanup_critical
+ .section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esa
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
-
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
printk("%d pages swap cached\n",cached);
}
-/* References to section boundaries */
-
-extern unsigned long _text;
-extern unsigned long _etext;
-extern unsigned long _edata;
-extern unsigned long __bss_start;
-extern unsigned long _end;
-
-extern unsigned long __init_begin;
-extern unsigned long __init_end;
-
extern unsigned long __initdata zholes_size[];
/*
* paging_init() sets up the page tables
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
static const int ssm_mask = 0x04000000L;
+ unsigned long ro_start_pfn, ro_end_pfn;
+
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
/* unmap whole virtual address space */
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_clear(&init_mm, 0, &pte);
set_pte(pg_table, pte);
}
#else /* CONFIG_64BIT */
+
void __init paging_init(void)
{
pgd_t * pg_dir;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
static const int ssm_mask = 0x04000000L;
-
unsigned long zones_size[MAX_NR_ZONES];
unsigned long dma_pfn, high_pfn;
+ unsigned long ro_start_pfn, ro_end_pfn;
memset(zones_size, 0, sizeof(zones_size));
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
high_pfn = max_low_pfn;
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&init_mm, 0, &pte);
continue;
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
+ printk("Write protected kernel read-only data: %#lx - %#lx\n",
+ (unsigned long)&__start_rodata,
+ PFN_ALIGN((unsigned long)&__end_rodata) - 1);
}
void free_initmem(void)