offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count);
offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit);
offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block);
- offset("#define TI_TP_VALUE ", struct thread_info, tp_value);
+ offset("#define TI_TP_VALUE ", struct thread_info, tp_value);
constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER);
constant("#define _THREAD_SIZE ", THREAD_SIZE);
constant("#define _THREAD_MASK ", THREAD_MASK);
linefeed;
}
+#ifdef CONFIG_32BIT
void output_sc_defines(void)
{
text("/* Linux sigcontext offsets. */");
offset("#define SC_STATUS ", struct sigcontext, sc_status);
offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir);
- offset("#define SC_CAUSE ", struct sigcontext, sc_cause);
- offset("#define SC_BADVADDR ", struct sigcontext, sc_badvaddr);
+ offset("#define SC_HI1 ", struct sigcontext, sc_hi1);
+ offset("#define SC_LO1 ", struct sigcontext, sc_lo1);
+ offset("#define SC_HI2 ", struct sigcontext, sc_hi2);
+ offset("#define SC_LO2 ", struct sigcontext, sc_lo2);
+ offset("#define SC_HI3 ", struct sigcontext, sc_hi3);
+ offset("#define SC_LO3 ", struct sigcontext, sc_lo3);
linefeed;
}
+#endif
+
+#ifdef CONFIG_64BIT
+void output_sc_defines(void)
+{
+ text("/* Linux sigcontext offsets. */");
+ offset("#define SC_REGS ", struct sigcontext, sc_regs);
+ offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
+ offset("#define SC_MDHI ", struct sigcontext, sc_hi);
+ offset("#define SC_MDLO ", struct sigcontext, sc_lo);
+ offset("#define SC_PC ", struct sigcontext, sc_pc);
+ offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
+ linefeed;
+}
+#endif
#ifdef CONFIG_MIPS32_COMPAT
void output_sc32_defines(void)
*/
int __compute_return_epc(struct pt_regs *regs)
{
- unsigned int *addr, bit, fcr31;
+ unsigned int *addr, bit, fcr31, dspcontrol;
long epc;
union mips_instruction insn;
epc += 8;
regs->cp0_epc = epc;
break;
+ case bposge32_op:
+ if (!cpu_has_dsp)
+ goto sigill;
+
+ dspcontrol = rddsp(0x01);
+
+ if (dspcontrol >= 32) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
}
break;
printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+
+sigill:
+ printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS, current);
+ return -EFAULT;
}
if (config3 & MIPS_CONF3_SM)
c->ases |= MIPS_ASE_SMARTMIPS;
+ if (config3 & MIPS_CONF3_DSP)
+ c->ases |= MIPS_ASE_DSP;
return config3 & MIPS_CONF_M;
}
c->cputype = CPU_20KC;
break;
case PRID_IMP_24K:
+ case PRID_IMP_24KE:
c->cputype = CPU_24K;
break;
case PRID_IMP_25KF:
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
BUILD_HANDLER watch watch sti verbose /* #23 */
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
+ BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
#ifdef CONFIG_64BIT
#include <linux/init.h>
#include <linux/completion.h>
+#include <asm/abi.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/pgtable.h>
#include <asm/system.h>
}
}
+extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
+extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
+
+/*
+ * Native o32 and N64 ABI without DSP ASE
+ */
+extern void setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set);
+extern void setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info);
+
+struct mips_abi mips_abi = {
+ .do_signal = do_signal,
+#ifdef CONFIG_TRAD_SIGNALS
+ .setup_frame = setup_frame,
+#endif
+ .setup_rt_frame = setup_rt_frame
+};
+
+#ifdef CONFIG_MIPS32_O32
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+extern void setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set);
+extern void setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info);
+
+struct mips_abi mips_abi_32 = {
+ .do_signal = do_signal32,
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32
+};
+#endif /* CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_MIPS32_N32
+/*
+ * N32 on 64-bit kernels, without DSP ASE
+ */
+extern void setup_rt_frame_n32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info);
+
+struct mips_abi mips_abi_n32 = {
+ .do_signal = do_signal,
+ .setup_rt_frame = setup_rt_frame_n32
+};
+#endif /* CONFIG_MIPS32_N32 */
+
asmlinkage void ret_from_fork(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
regs->cp0_status = status;
clear_used_math();
lose_fpu();
+ if (cpu_has_dsp)
+ __init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
current_thread_info()->addr_limit = USER_DS;
preempt_disable();
- if (is_fpu_owner()) {
+ if (is_fpu_owner())
save_fp(p);
- }
+
+ if (cpu_has_dsp)
+ save_dsp(p);
preempt_enable();
#include <asm/byteorder.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/pgtable.h>
write_c0_status(flags);
break;
}
+ case DSP_BASE ... DSP_BASE + 5:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ if (child->thread.dsp.used_dsp) {
+ dspreg_t *dregs = __get_dsp_regs(child);
+ tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ } else {
+ tmp = -1; /* DSP registers yet used */
+ }
+ break;
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
default:
tmp = 0;
ret = -EIO;
else
child->thread.fpu.soft.fcr31 = data;
break;
+ case DSP_BASE ... DSP_BASE + 5:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dspreg_t *dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
default:
/* The rest are not allowed. */
ret = -EIO;
#include <linux/security.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/pgtable.h>
write_c0_status(flags);
break;
}
+ case DSP_BASE ... DSP_BASE + 5:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ if (child->thread.dsp.used_dsp) {
+ dspreg_t *dregs = __get_dsp_regs(child);
+ tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ } else {
+ tmp = -1; /* DSP registers yet used */
+ }
+ break;
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
default:
tmp = 0;
ret = -EIO;
else
child->thread.fpu.soft.fcr31 = data;
break;
+ case DSP_BASE ... DSP_BASE + 5:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dspreg_t *dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
default:
/* The rest are not allowed. */
ret = -EIO;
.set noreorder
.set mips3
- /* Save floating point context */
+
LEAF(_save_fp_context)
cfc1 t1, fcr31
EX sdc1 $f28, SC_FPREGS+224(a0)
EX sdc1 $f30, SC_FPREGS+240(a0)
EX sw t1, SC_FPC_CSR(a0)
- cfc1 t0, $0 # implementation/version
- EX sw t0, SC_FPC_EIR(a0)
-
jr ra
li v0, 0 # success
END(_save_fp_context)
sys sys_ni_syscall 0 /* sys_vserver */
sys sys_waitid 5
sys sys_ni_syscall 0 /* available, was setaltroot */
- sys sys_add_key 5
+ sys sys_add_key 5 /* 4280 */
sys sys_request_key 4
sys sys_keyctl 5
sys sys_set_thread_area 1
}
__setup("nofpu", fpu_disable);
+
+int __init dsp_disable(char *s)
+{
+ cpu_data[0].ases &= ~MIPS_ASE_DSP;
+
+ return 1;
+}
+
+__setup("nodsp", dsp_disable);
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
+#include <linux/config.h>
+
static inline int
setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
int err = 0;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
- err |= __put_user(regs->cp0_status, &sc->sc_status);
#define save_gp_reg(i) do { \
err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
save_gp_reg(31);
#undef save_gp_reg
+#ifdef CONFIG_32BIT
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
- err |= __put_user(regs->cp0_cause, &sc->sc_cause);
- err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+#endif
+#ifdef CONFIG_64BIT
+ err |= __put_user(regs->hi, &sc->sc_hi[0]);
+ err |= __put_user(regs->lo, &sc->sc_lo[0]);
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi[1]);
+ err |= __put_user(mflo1(), &sc->sc_lo[1]);
+ err |= __put_user(mfhi2(), &sc->sc_hi[2]);
+ err |= __put_user(mflo2(), &sc->sc_lo[2]);
+ err |= __put_user(mfhi3(), &sc->sc_hi[3]);
+ err |= __put_user(mflo3(), &sc->sc_lo[3]);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+#endif
err |= __put_user(!!used_math(), &sc->sc_used_math);
static inline int
restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
- int err = 0;
unsigned int used_math;
+ unsigned long treg;
+ int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+#ifdef CONFIG_32BIT
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+#endif
+#ifdef CONFIG_64BIT
+ err |= __get_user(regs->hi, &sc->sc_hi[0]);
+ err |= __get_user(regs->lo, &sc->sc_lo[0]);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+#endif
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
#include <linux/unistd.h>
#include <linux/compiler.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-static int do_signal(sigset_t *oldset, struct pt_regs *regs);
+int do_signal(sigset_t *oldset, struct pt_regs *regs);
/*
* Atomically swap in the new signal mask, and wait for a signal.
badframe:
force_sig(SIGSEGV, current);
}
-#endif
+#endif /* CONFIG_TRAD_SIGNALS */
save_static_function(sys_rt_sigreturn);
__attribute_used__ noinline static void
}
#ifdef CONFIG_TRAD_SIGNALS
-static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+void setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set)
{
struct sigframe *frame;
}
#endif
-static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
+void setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe *frame;
regs->regs[0] = 0; /* Don't deal with this again. */
-#ifdef CONFIG_TRAD_SIGNALS
- if (ka->sa.sa_flags & SA_SIGINFO) {
-#else
- if (1) {
-#endif
-#ifdef CONFIG_MIPS32_N32
- if ((current->thread.mflags & MF_ABI_MASK) == MF_N32)
- setup_rt_frame_n32 (ka, regs, sig, oldset, info);
- else
-#endif
- setup_rt_frame(ka, regs, sig, oldset, info);
- }
-#ifdef CONFIG_TRAD_SIGNALS
+ if (sig_uses_siginfo(ka))
+ current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info);
else
- setup_frame(ka, regs, sig, oldset);
-#endif
+ current->thread.abi->setup_frame(ka, regs, sig, oldset);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
spin_unlock_irq(¤t->sighand->siglock);
}
-extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
-extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs);
-
-static int do_signal(sigset_t *oldset, struct pt_regs *regs)
+int do_signal(sigset_t *oldset, struct pt_regs *regs)
{
struct k_sigaction ka;
siginfo_t info;
int signr;
-#ifdef CONFIG_BINFMT_ELF32
- if ((current->thread.mflags & MF_ABI_MASK) == MF_O32) {
- return do_signal32(oldset, regs);
- }
-#endif
-
/*
* We want the common case to go fast, which is why we may in certain
* cases get here from kernel mode. Just return without doing anything
{
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING) {
-#ifdef CONFIG_BINFMT_ELF32
- if (likely((current->thread.mflags & MF_ABI_MASK) == MF_O32)) {
- do_signal32(oldset, regs);
- return;
- }
-#endif
-#ifdef CONFIG_BINFMT_IRIX
- if (unlikely(current->personality != PER_LINUX)) {
- do_irix_signal(oldset, regs);
- return;
- }
-#endif
- do_signal(oldset, regs);
+ current->thread.abi->do_signal(oldset, regs);
}
}
#include <linux/suspend.h>
#include <linux/compiler.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc)
{
+ u32 used_math;
int err = 0;
- __u32 used_math;
+ s32 treg;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
- err |= __put_user(regs->cp0_cause, &sc->sc_cause);
- err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_hi1);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
err |= __put_user(!!used_math(), &sc->sc_used_math);
return (void *)((sp - frame_size) & ALMASK);
}
-static inline void setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+void setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set)
{
struct sigframe *frame;
force_sigsegv(signr, current);
}
-static inline void setup_rt_frame(struct k_sigaction * ka,
- struct pt_regs *regs, int signr,
- sigset_t *set, siginfo_t *info)
+void setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe32 *frame;
int err = 0;
regs->regs[0] = 0; /* Don't deal with this again. */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(ka, regs, sig, oldset, info);
+ current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info);
else
- setup_frame(ka, regs, sig, oldset);
+ current->thread.abi->setup_frame(ka, regs, sig, oldset);
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/module.h>
#include <asm/pgtable.h>
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
(regs->cp0_status & ST0_TS) ? "" : "not ");
}
+asmlinkage void do_dsp(struct pt_regs *regs)
+{
+ if (cpu_has_dsp)
+ panic("Unexpected DSP exception\n");
+
+ force_sig(SIGILL, current);
+}
+
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
#endif
if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
- change_c0_status(ST0_CU|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
+ change_c0_status(ST0_CU|ST0_MX|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+ if (cpu_has_dsp)
+ set_c0_status(ST0_MX);
+
/*
* Some MIPS CPUs have a dedicated interrupt vector which reduces the
* interrupt processing overhead. Use it where available.
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
- set_except_vector(22, handle_mdmx);
-
- if (cpu_has_fpu && !cpu_has_nofpuex)
- set_except_vector(15, handle_fpe);
-
- if (cpu_has_mcheck)
- set_except_vector(24, handle_mcheck);
-
- if (cpu_has_vce)
- /* Special exception: R4[04]00 uses also the divec space. */
- memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
- else if (cpu_has_4kex)
- memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
- else
- memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
if (current_cpu_data.cputype == CPU_R6000 ||
current_cpu_data.cputype == CPU_R6000A) {
//set_except_vector(15, handle_ndc);
}
+ if (cpu_has_fpu && !cpu_has_nofpuex)
+ set_except_vector(15, handle_fpe);
+
+ set_except_vector(22, handle_mdmx);
+
+ if (cpu_has_mcheck)
+ set_except_vector(24, handle_mcheck);
+
+ if (cpu_has_dsp)
+ set_except_vector(26, handle_dsp);
+
+ if (cpu_has_vce)
+ /* Special exception: R4[04]00 uses also the divec space. */
+ memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
+ else if (cpu_has_4kex)
+ memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
+ else
+ memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
+
signal_init();
#ifdef CONFIG_MIPS32_COMPAT
signal32_init();
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 by Ralf Baechle
+ * Copyright (C) 2005 MIPS Technologies, Inc.
+ */
+#ifndef _ASM_ABI_H
+#define _ASM_ABI_H
+
+#include <asm/signal.h>
+#include <asm/siginfo.h>
+
+struct mips_abi {
+ int (* const do_signal)(sigset_t *oldset, struct pt_regs *regs);
+ int (* const setup_frame)(struct k_sigaction * ka,
+ struct pt_regs *regs, int signr,
+ sigset_t *set);
+ int (* const setup_rt_frame)(struct k_sigaction * ka,
+ struct pt_regs *regs, int signr,
+ sigset_t *set, siginfo_t *info);
+};
+
+#endif /* _ASM_ABI_H */
#endif
#endif
+#ifndef cpu_has_dsp
+#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
+#endif
+
/*
* Certain CPUs may throw bizarre exceptions if not the whole cacheline
* contains valid instructions. For these we ensure proper alignment of
#define PRID_IMP_4KEMPR2 0x9100
#define PRID_IMP_4KSD 0x9200
#define PRID_IMP_24K 0x9300
+#define PRID_IMP_24KE 0x9600
#define PRID_IMP_UNKNOWN 0xff00
#define MIPS_ASE_MDMX 0x00000002 /* MIPS digital media extension */
#define MIPS_ASE_MIPS3D 0x00000004 /* MIPS-3D */
#define MIPS_ASE_SMARTMIPS 0x00000008 /* SmartMIPS */
+#define MIPS_ASE_DSP 0x00000010 /* Signal Processing ASE */
#endif /* _ASM_CPU_H */
--- /dev/null
+/*
+ * Copyright (C) 2005 Mips Technologies
+ * Author: Chris Dearman, chris@mips.com derived from fpu.h
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_DSP_H
+#define _ASM_DSP_H
+
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/hazards.h>
+#include <asm/mipsregs.h>
+
+#define DSP_DEFAULT 0x00000000
+#define DSP_MASK 0x1f
+
+#define __enable_dsp_hazard() \
+do { \
+ asm("_ehb"); \
+} while (0)
+
+static inline void __init_dsp(void)
+{
+ mthi1(0);
+ mtlo1(0);
+ mthi2(0);
+ mtlo2(0);
+ mthi3(0);
+ mtlo3(0);
+ wrdsp(DSP_DEFAULT, DSP_MASK);
+}
+
+static inline void init_dsp(void)
+{
+ if (cpu_has_dsp)
+ __init_dsp();
+}
+
+#define __save_dsp(tsk) \
+do { \
+ tsk->thread.dsp.dspr[0] = mfhi1(); \
+ tsk->thread.dsp.dspr[1] = mflo1(); \
+ tsk->thread.dsp.dspr[2] = mfhi2(); \
+ tsk->thread.dsp.dspr[3] = mflo2(); \
+ tsk->thread.dsp.dspr[4] = mfhi3(); \
+ tsk->thread.dsp.dspr[5] = mflo3(); \
+} while (0)
+
+#define save_dsp(tsk) \
+do { \
+ if (cpu_has_dsp) \
+ __save_dsp(tsk); \
+} while (0)
+
+#define __restore_dsp(tsk) \
+do { \
+ mthi1(tsk->thread.dsp.dspr[0]); \
+ mtlo1(tsk->thread.dsp.dspr[1]); \
+ mthi2(tsk->thread.dsp.dspr[2]); \
+ mtlo2(tsk->thread.dsp.dspr[3]); \
+ mthi3(tsk->thread.dsp.dspr[4]); \
+ mtlo3(tsk->thread.dsp.dspr[5]); \
+} while (0)
+
+#define restore_dsp(tsk) \
+do { \
+ if (cpu_has_dsp) \
+ __restore_dsp(tsk); \
+} while (0)
+
+#define __get_dsp_regs(tsk) \
+({ \
+ if (tsk == current) \
+ __save_dsp(current); \
+ \
+ tsk->thread.dsp.dspr; \
+})
+
+#endif /* _ASM_DSP_H */
#ifdef __KERNEL__
+struct mips_abi;
+
+extern struct mips_abi mips_abi;
+extern struct mips_abi mips_abi_32;
+extern struct mips_abi mips_abi_n32;
+
#ifdef CONFIG_32BIT
-#define SET_PERSONALITY(ex, ibcs2) \
-do { \
- if (ibcs2) \
- set_personality(PER_SVR4); \
- set_personality(PER_LINUX); \
+#define SET_PERSONALITY(ex, ibcs2) \
+do { \
+ if (ibcs2) \
+ set_personality(PER_SVR4); \
+ set_personality(PER_LINUX); \
+ \
+ current->thread.abi = &mips_abi; \
} while (0)
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
-#define SET_PERSONALITY(ex, ibcs2) \
-do { current->thread.mflags &= ~MF_ABI_MASK; \
- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
- if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \
- ((ex).e_flags & EF_MIPS_ABI) == 0) \
- current->thread.mflags |= MF_N32; \
- else \
- current->thread.mflags |= MF_O32; \
- } else \
- current->thread.mflags |= MF_N64; \
- if (ibcs2) \
- set_personality(PER_SVR4); \
- else if (current->personality != PER_LINUX32) \
- set_personality(PER_LINUX); \
+#ifdef CONFIG_MIPS32_N32
+#define __SET_PERSONALITY32_N32() \
+ do { \
+ current->thread.mflags |= MF_N32; \
+ current->thread.abi = &mips_abi_n32; \
+ } while (0)
+#else
+#define __SET_PERSONALITY32_N32() \
+ do { } while (0)
+#endif
+
+#ifdef CONFIG_MIPS32_O32
+#define __SET_PERSONALITY32_O32() \
+ do { \
+ current->thread.mflags |= MF_O32; \
+ current->thread.abi = &mips_abi_32; \
+ } while (0)
+#else
+#define __SET_PERSONALITY32_O32() \
+ do { } while (0)
+#endif
+
+#ifdef CONFIG_MIPS32_COMPAT
+#define __SET_PERSONALITY32(ex) \
+do { \
+ if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \
+ ((ex).e_flags & EF_MIPS_ABI) == 0) \
+ __SET_PERSONALITY32_N32(); \
+ else \
+ __SET_PERSONALITY32_O32(); \
+} while (0)
+#else
+#define __SET_PERSONALITY32(ex) do { } while (0)
+#endif
+
+#define SET_PERSONALITY(ex, ibcs2) \
+do { \
+ current->thread.mflags &= ~MF_ABI_MASK; \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ __SET_PERSONALITY32(ex); \
+ else { \
+ current->thread.mflags |= MF_N64; \
+ current->thread.abi = &mips_abi; \
+ } \
+ \
+ if (ibcs2) \
+ set_personality(PER_SVR4); \
+ else if (current->personality != PER_LINUX32) \
+ set_personality(PER_LINUX); \
} while (0)
#endif /* CONFIG_64BIT */
spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
tgei_op, tgeiu_op, tlti_op, tltiu_op,
teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
- bltzal_op, bgezal_op, bltzall_op, bgezall_op
- /*
- * The others (0x14 - 0x1f) are unused.
- */
+ bltzal_op, bgezal_op, bltzall_op, bgezall_op,
+ rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
+ rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
+ bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
};
/*
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
+
#define cpu_has_nofpuex 0
#define cpu_has_64bits 1
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
#define cpu_icache_snoops_remote_store 1
#define cpu_has_nofpuex 0
#define cpu_has_ejtag 0
#define cpu_has_vtag_icache 0
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
#endif /* __ASM_MACH_IP32_CPU_FEATURE_OVERRIDES_H */
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
#define cpu_icache_snoops_remote_store 0
#define cpu_has_nofpuex 0
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
#define cpu_icache_snoops_remote_store 0
#define cpu_has_nofpuex 0
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
#define cpu_has_nofpuex 0
#define cpu_has_64bits 1
#define cpu_has_vtag_icache 1
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
#define cpu_icache_snoops_remote_store 0
#define cpu_has_nofpuex 0
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_dsp 0
#define cpu_icache_snoops_remote_store 0
#define cpu_has_nofpuex 0
#define ST0_IL (_ULCAST_(1) << 23)
#define ST0_DL (_ULCAST_(1) << 24)
+/*
+ * Enable the MIPS DSP ASE
+ */
+#define ST0_MX 0x01000000
+
/*
* Bitfields in the TX39 family CP0 Configuration Register 3
*/
#define MIPS_CONF3_VINT (_ULCAST_(1) << 5)
#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6)
#define MIPS_CONF3_LPA (_ULCAST_(1) << 7)
+#define MIPS_CONF3_DSP (_ULCAST_(1) << 10)
/*
* Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
: "=r" (__res)); \
__res;})
+#define rddsp(mask) \
+({ \
+ unsigned int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # rddsp $1, %x1 \n" \
+ " .word 0x7c000cb8 | (%x1 << 16) \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__res) \
+ : "i" (mask)); \
+ __res; \
+})
+
+#define wrdsp(val, mask) \
+do { \
+ unsigned int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # wrdsp $1, %x1 \n" \
+ " .word 0x7c2004f8 | (%x1 << 15) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (val), "i" (mask)); \
+ __res; \
+} while (0)
+
+#if 0 /* Need DSP ASE capable assembler ... */
+#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
+#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
+#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
+#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;})
+
+#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;})
+#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;})
+#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;})
+#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;})
+
+#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x))
+#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x))
+#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x))
+#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x))
+
+#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x))
+#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x))
+#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x))
+#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x))
+
+#else
+
+#define mfhi0() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mfhi %0, $ac0 \n" \
+ " .word 0x00000810 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mfhi1() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mfhi %0, $ac1 \n" \
+ " .word 0x00200810 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mfhi2() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mfhi %0, $ac2 \n" \
+ " .word 0x00400810 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mfhi3() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mfhi %0, $ac3 \n" \
+ " .word 0x00600810 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mflo0() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mflo %0, $ac0 \n" \
+ " .word 0x00000812 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mflo1() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mflo %0, $ac1 \n" \
+ " .word 0x00200812 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mflo2() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mflo %0, $ac2 \n" \
+ " .word 0x00400812 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mflo3() \
+({ \
+ unsigned long __treg; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " # mflo %0, $ac3 \n" \
+ " .word 0x00600812 \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__treg)); \
+ __treg; \
+})
+
+#define mthi0(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mthi $1, $ac0 \n" \
+ " .word 0x00200011 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#define mthi1(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mthi $1, $ac1 \n" \
+ " .word 0x00200811 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#define mthi2(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mthi $1, $ac2 \n" \
+ " .word 0x00201011 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#define mthi3(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mthi $1, $ac3 \n" \
+ " .word 0x00201811 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#define mtlo0(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mtlo $1, $ac0 \n" \
+ " .word 0x00200013 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#define mtlo1(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mtlo $1, $ac1 \n" \
+ " .word 0x00200813 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#define mtlo2(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mtlo $1, $ac2 \n" \
+ " .word 0x00201013 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#define mtlo3(x) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " move $1, %0 \n" \
+ " # mtlo $1, $ac3 \n" \
+ " .word 0x00201813 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+} while (0)
+
+#endif
+
/*
* TLB operations.
*
{{0,},} \
}
+#define NUM_DSP_REGS 6
+
+typedef __u32 dspreg_t;
+
+struct mips_dsp_state {
+ dspreg_t dspr[NUM_DSP_REGS];
+ unsigned int dspcontrol;
+ unsigned short used_dsp;
+};
+
+#define INIT_DSP {{0,},}
+
typedef struct {
unsigned long seg;
} mm_segment_t;
#define ARCH_MIN_TASKALIGN 8
+struct mips_abi;
+
/*
* If you change thread_struct remember to change the #defines below too!
*/
/* Saved fpu/fpu emulator stuff. */
union mips_fpu_union fpu;
+ /* Saved state of the DSP ASE, if available. */
+ struct mips_dsp_state dsp;
+
/* Other stuff associated with the thread. */
unsigned long cp0_badvaddr; /* Last user fault */
unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
unsigned long mflags;
unsigned long irix_trampoline; /* Wheee... */
unsigned long irix_oldctx;
+ struct mips_abi *abi;
};
#define MF_ABI_MASK (MF_32BIT_REGS | MF_32BIT_ADDR)
* saved fpu/fpu emulator stuff \
*/ \
INIT_FPU, \
+ /* \
+ * saved dsp/dsp emulator stuff \
+ */ \
+ INIT_DSP, \
/* \
* Other stuff associated with the process \
*/ \
#define MMLO 68
#define FPC_CSR 69
#define FPC_EIR 70
+#define DSP_BASE 71 /* 3 more hi / lo register pairs */
+#define DSP_CONTROL 77
/*
* This struct defines the way the registers are stored on the stack during a
/* Saved special registers. */
unsigned long cp0_status;
- unsigned long lo;
unsigned long hi;
+ unsigned long lo;
unsigned long cp0_badvaddr;
unsigned long cp0_cause;
unsigned long cp0_epc;
unsigned int sc_fpc_csr;
unsigned int sc_fpc_eir; /* Unused */
unsigned int sc_used_math;
- unsigned int sc_ssflags; /* Unused */
+ unsigned int sc_dsp; /* dsp status, was sc_ssflags */
unsigned long long sc_mdhi;
unsigned long long sc_mdlo;
-
- unsigned int sc_cause; /* Unused */
- unsigned int sc_badvaddr; /* Unused */
-
- unsigned long sc_sigset[4]; /* kernel's sigset_t */
+ unsigned long sc_hi1; /* Was sc_cause */
+ unsigned long sc_lo1; /* Was sc_badvaddr */
+ unsigned long sc_hi2; /* Was sc_sigset[4] */
+ unsigned long sc_lo2;
+ unsigned long sc_hi3;
+ unsigned long sc_lo3;
};
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
* Warning: this structure illdefined with sc_badvaddr being just an unsigned
* int so it was changed to unsigned long in 2.6.0-test1. This may break
* binary compatibility - no prisoners.
+ * DSP ASE in 2.6.12-rc4. Turn sc_mdhi and sc_mdlo into an array of four
+ * entries, add sc_dsp and sc_reserved for padding. No prisoners.
*/
struct sigcontext {
unsigned long sc_regs[32];
unsigned long sc_fpregs[32];
- unsigned long sc_mdhi;
- unsigned long sc_mdlo;
+ unsigned long sc_hi[4];
+ unsigned long sc_lo[4];
unsigned long sc_pc;
- unsigned long sc_badvaddr;
- unsigned int sc_status;
unsigned int sc_fpc_csr;
- unsigned int sc_fpc_eir;
unsigned int sc_used_math;
- unsigned int sc_cause;
+ unsigned int sc_dsp;
+ unsigned int sc_reserved;
};
#ifdef __KERNEL__
#include <linux/posix_types.h>
struct sigcontext32 {
- __u32 sc_regmask; /* Unused */
- __u32 sc_status;
- __u64 sc_pc;
- __u64 sc_regs[32];
- __u64 sc_fpregs[32];
- __u32 sc_ownedfp; /* Unused */
- __u32 sc_fpc_csr;
- __u32 sc_fpc_eir; /* Unused */
- __u32 sc_used_math;
- __u32 sc_ssflags; /* Unused */
- __u64 sc_mdhi;
- __u64 sc_mdlo;
-
- __u32 sc_cause; /* Unused */
- __u32 sc_badvaddr; /* Unused */
-
- __u32 sc_sigset[4]; /* kernel's sigset_t */
+ __u32 sc_regmask; /* Unused */
+ __u32 sc_status;
+ __u64 sc_pc;
+ __u64 sc_regs[32];
+ __u64 sc_fpregs[32];
+ __u32 sc_ownedfp; /* Unused */
+ __u32 sc_fpc_csr;
+ __u32 sc_fpc_eir; /* Unused */
+ __u32 sc_used_math;
+ __u32 sc_dsp; /* dsp status, was sc_ssflags */
+ __u64 sc_mdhi;
+ __u64 sc_mdlo;
+ __u32 sc_hi1; /* Was sc_cause */
+ __u32 sc_lo1; /* Was sc_badvaddr */
+ __u32 sc_hi2; /* Was sc_sigset[4] */
+ __u32 sc_lo2;
+ __u32 sc_hi3;
+ __u32 sc_lo3;
};
#endif /* __KERNEL__ */
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ flag is for shared interrupt support on PCI and EISA.
+ */
+#define SA_SAMPLE_RANDOM SA_RESTART
+
+#ifdef CONFIG_TRAD_SIGNALS
+#define sig_uses_siginfo(ka) ((ka)->sa.sa_flags & SA_SIGINFO)
+#else
+#define sig_uses_siginfo(ka) (1)
+#endif
+
+#endif /* __KERNEL__ */
+
#define SIG_BLOCK 1 /* for blocking signals */
#define SIG_UNBLOCK 2 /* for unblocking signals */
#define SIG_SETMASK 3 /* for setting the signal mask */
#define SIG_SETMASK32 256 /* Goodie from SGI for BSD compatibility:
set only the low 32 bit of the sigset. */
-#include <asm-generic/signal.h>
+
+/* Type of a signal handler. */
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+/* Fake signal functions */
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
struct sigaction {
unsigned int sa_flags;
#include <asm/addrspace.h>
#include <asm/cpu-features.h>
+#include <asm/dsp.h>
#include <asm/ptrace.h>
#include <asm/war.h>
#include <asm/interrupt.h>
struct task_struct;
-#define switch_to(prev,next,last) \
-do { \
- (last) = resume(prev, next, next->thread_info); \
+#define switch_to(prev,next,last) \
+do { \
+ if (cpu_has_dsp) \
+ __save_dsp(prev); \
+ (last) = resume(prev, next, next->thread_info); \
+ if (cpu_has_dsp) \
+ __restore_dsp(current); \
} while(0)
#define ROT_IN_PIECES \