Commit 41c594ab authored by Ralf Baechle's avatar Ralf Baechle
Browse files

[MIPS] MT: Improved multithreading support.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 2600990e
......@@ -1447,6 +1447,10 @@ choice
prompt "MIPS MT options"
depends on MIPS_MT
config MIPS_MT_SMTC
bool "SMTC: Use all TCs on all VPEs for SMP"
select SMP
config MIPS_MT_SMP
bool "Use 1 TC on each available VPE for SMP"
select SMP
......@@ -1613,7 +1617,7 @@ source "mm/Kconfig"
config SMP
bool "Multi-Processing support"
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
......
......@@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
......
......@@ -69,6 +69,9 @@ void output_ptreg_defines(void)
offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
offset("#define PT_STATUS ", struct pt_regs, cp0_status);
offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
#ifdef CONFIG_MIPS_MT_SMTC
offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
#endif /* CONFIG_MIPS_MT_SMTC */
size("#define PT_SIZE ", struct pt_regs);
linefeed;
}
......
......@@ -17,6 +17,9 @@
#include <asm/isadep.h>
#include <asm/thread_info.h>
#include <asm/war.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif
#ifdef CONFIG_PREEMPT
.macro preempt_stop
......@@ -75,6 +78,37 @@ FEXPORT(syscall_exit)
bnez t0, syscall_exit_work
FEXPORT(restore_all) # restore full frame
#ifdef CONFIG_MIPS_MT_SMTC
/* Detect and execute deferred IPI "interrupts" */
move a0,sp
jal deferred_smtc_ipi
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS
ori v1, v0, TCSTATUS_IXMT
mtc0 v1, CP0_TCSTATUS
andi v0, TCSTATUS_IXMT
ehb
mfc0 t0, CP0_TCCONTEXT
DMT 9 # dmt t1
jal mips_ihb
mfc0 t2, CP0_STATUS
andi t3, t0, 0xff00
or t2, t2, t3
mtc0 t2, CP0_STATUS
ehb
andi t1, t1, VPECONTROL_TE
beqz t1, 1f
EMT
1:
mfc0 v1, CP0_TCSTATUS
/* We set IXMT above, XOR should cler it here */
xori v1, v1, TCSTATUS_IXMT
or v1, v0, v1
mtc0 v1, CP0_TCSTATUS
ehb
xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC */
.set noat
RESTORE_TEMP
RESTORE_AT
......
......@@ -283,11 +283,33 @@
*/
3:
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify write of Status must be atomic */
mfc0 t2, CP0_TCSTATUS
ori t1, t2, TCSTATUS_IXMT
mtc0 t1, CP0_TCSTATUS
andi t2, t2, TCSTATUS_IXMT
ehb
DMT 9 # dmt t1
jal mips_ihb
nop
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t0, CP0_STATUS
ori t0, 0x1f
xori t0, 0x1f
mtc0 t0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
andi t1, t1, VPECONTROL_TE
beqz t1, 9f
nop
EMT # emt
9:
mfc0 t1, CP0_TCSTATUS
xori t1, t1, TCSTATUS_IXMT
or t1, t1, t2
mtc0 t1, CP0_TCSTATUS
ehb
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v0, GDB_FR_STATUS(sp)
LONG_L v1, GDB_FR_EPC(sp)
mtc0 v0, CP0_STATUS
......
......@@ -140,6 +140,7 @@
#include <asm/system.h>
#include <asm/gdb-stub.h>
#include <asm/inst.h>
#include <asm/smp.h>
/*
* external low-level support routines
......@@ -669,6 +670,64 @@ static void kgdb_wait(void *arg)
local_irq_restore(flags);
}
/*
* GDB stub needs to call kgdb_wait on all processor with interrupts
* disabled, so it uses it's own special variant.
*/
static int kgdb_smp_call_kgdb_wait(void)
{
#ifdef CONFIG_SMP
struct call_data_struct data;
int i, cpus = num_online_cpus() - 1;
int cpu = smp_processor_id();
/*
* Can die spectacularly if this CPU isn't yet marked online
*/
BUG_ON(!cpu_online(cpu));
if (!cpus)
return 0;
if (spin_is_locked(&smp_call_lock)) {
/*
* Some other processor is trying to make us do something
* but we're not going to respond... give up
*/
return -1;
}
/*
* We will continue here, accepting the fact that
* the kernel may deadlock if another CPU attempts
* to call smp_call_function now...
*/
data.func = kgdb_wait;
data.info = NULL;
atomic_set(&data.started, 0);
data.wait = 0;
spin_lock(&smp_call_lock);
call_data = &data;
mb();
/* Send a message to all other CPUs and wait for them to respond */
for (i = 0; i < NR_CPUS; i++)
if (cpu_online(i) && i != cpu)
core_send_ipi(i, SMP_CALL_FUNCTION);
/* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */
while (atomic_read(&data.started) != cpus)
barrier();
call_data = NULL;
spin_unlock(&smp_call_lock);
#endif
return 0;
}
/*
* This function does all command processing for interfacing to gdb. It
......@@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs)
/*
* force other cpus to enter kgdb
*/
smp_call_function(kgdb_wait, NULL, 0, 0);
kgdb_smp_call_kgdb_wait();
/*
* If we're in breakpoint() increment the PC
......
......@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
......@@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp)
SAVE_AT
.set push
.set noreorder
#ifdef CONFIG_MIPS_MT_SMTC
/*
* To keep from blindly blocking *all* interrupts
* during service by SMTC kernel, we also want to
* pass the IM value to be cleared.
*/
EXPORT(except_vec_vi_mori)
ori a0, $0, 0
#endif /* CONFIG_MIPS_MT_SMTC */
EXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
j except_vec_vi_handler
......@@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end)
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC has an interesting problem that interrupts are level-triggered,
* and the CLI macro will clear EXL, potentially causing a duplicate
* interrupt service invocation. So we need to clear the associated
* IM bit of Status prior to doing CLI, and restore it after the
* service routine has been invoked - we must assume that the
* service routine will have cleared the state, and any active
* level represents a new or otherwised unserviced event...
*/
mfc0 t1, CP0_STATUS
and t0, a0, t1
mfc0 t2, CP0_TCCONTEXT
or t0, t0, t2
mtc0 t0, CP0_TCCONTEXT
xor t1, t1, t0
mtc0 t1, CP0_STATUS
ehb
#endif /* CONFIG_MIPS_MT_SMTC */
CLI
move a0, sp
jalr v0
......
......@@ -18,6 +18,7 @@
#include <linux/threads.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/page.h>
#include <asm/mipsregs.h>
......@@ -82,12 +83,33 @@
*/
.macro setup_c0_status set clr
.set push
#ifdef CONFIG_MIPS_MT_SMTC
/*
* For SMTC, we need to set privilege and disable interrupts only for
* the current TC, using the TCStatus register.
*/
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU, leave IXMT */
xori t0, 0x00001800
mtc0 t0, CP0_TCSTATUS
ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
xor t0, ST0_EXL | ST0_ERL | \clr
mtc0 t0, CP0_STATUS
#else
mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS
.set noreorder
sll zero,3 # ehb
#endif
.set pop
.endm
......@@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
ARC64_TWIDDLE_PC
#ifdef CONFIG_MIPS_MT_SMTC
/*
* In SMTC kernel, "CLI" is thread-specific, in TCStatus.
* We still need to enable interrupts globally in Status,
* and clear EXL/ERL.
*
* TCContext is used to track interrupt levels under
* service in SMTC kernel. Clear for boot TC before
* allowing any interrupts.
*/
mtc0 zero, CP0_TCCONTEXT
mfc0 t0, CP0_STATUS
ori t0, t0, 0xff1f
xori t0, t0, 0x001e
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
......@@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Read-modify-writes of Status must be atomic, and this
* is one case where CLI is invoked without EXL being
* necessarily set. The CLI and setup_c0_status will
* in fact be redundant for all but the first TC of
* each VPE being booted.
*/
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
setup_c0_status_sec
smp_slave_setup
#ifdef CONFIG_MIPS_MT_SMTC
andi t2, t2, VPECONTROL_TE
beqz t2, 2f
EMT # emt
2:
#endif /* CONFIG_MIPS_MT_SMTC */
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
......
......@@ -187,6 +187,10 @@ void mask_and_ack_8259A(unsigned int irq)
outb(cached_21,0x21);
outb(0x60+irq,0x20); /* 'Specific EOI' to master */
}
#ifdef CONFIG_MIPS_MT_SMTC
if (irq_hwmask[irq] & ST0_IM)
set_c0_status(irq_hwmask[irq] & ST0_IM);
#endif /* CONFIG_MIPS_MT_SMTC */
spin_unlock_irqrestore(&i8259A_lock, flags);
return;
......
......@@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
mask_msc_irq(irq);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
#ifdef CONFIG_MIPS_MT_SMTC
/* This actually needs to be a call into platform code */
if (irq_hwmask[irq] & ST0_IM)
set_c0_status(irq_hwmask[irq] & ST0_IM);
#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
......@@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
#ifdef CONFIG_MIPS_MT_SMTC
if (irq_hwmask[irq] & ST0_IM)
set_c0_status(irq_hwmask[irq] & ST0_IM);
#endif /* CONFIG_MIPS_MT_SMTC */
}
/*
......
......@@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq)
atomic_t irq_err_count;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
* in do_IRQ. These are passed in setup_irq_smtc() and stored
* in this table.
*/
unsigned long irq_hwmask[NR_IRQS];
#endif /* CONFIG_MIPS_MT_SMTC */
#undef do_IRQ
/*
......@@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
{
irq_enter();
__DO_IRQ_SMTC_HOOK();
__do_IRQ(irq, regs);
irq_exit();
......@@ -129,6 +139,9 @@ void __init init_IRQ(void)
irq_desc[i].depth = 1;
irq_desc[i].handler = &no_irq_type;
spin_lock_init(&irq_desc[i].lock);
#ifdef CONFIG_MIPS_MT_SMTC
irq_hwmask[i] = 0;
#endif /* CONFIG_MIPS_MT_SMTC */
}
arch_init_irq();
......
/*
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
/*
* CPU mask used to set process affinity for MT VPEs/TCs with FPUs
*/
cpumask_t mt_fpu_cpumask;
#ifdef CONFIG_MIPS_MT_FPAFF
#include <linux/cpu.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
unsigned long mt_fpemul_threshold = 0;
/*
* Replacement functions for the sys_sched_setaffinity() and
* sys_sched_getaffinity() system calls, so that we can integrate
* FPU affinity with the user's requested processor affinity.
* This code is 98% identical with the sys_sched_setaffinity()
* and sys_sched_getaffinity() system calls, and should be
* updated when kernel/sched.c changes.
*/
/*
* find_process_by_pid - find a process with a matching PID value.
* used in sys_sched_set/getaffinity() in kernel/sched.c, so
* cloned here.
*/
static inline task_t *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_pid(pid) : current;
}
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
cpumask_t new_mask;
cpumask_t effective_mask;
int retval;
task_t *p;
if (len < sizeof(new_mask))
return -EINVAL;
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
return -EFAULT;
lock_cpu_hotplug();
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (!p) {
read_unlock(&tasklist_lock);
unlock_cpu_hotplug();
return -ESRCH;
}
/*
* It is not safe to call set_cpus_allowed with the
* tasklist_lock held. We will bump the task_struct's
* usage count and drop tasklist_lock before invoking
* set_cpus_allowed.
*/
get_task_struct(p);
retval = -EPERM;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE)) {
read_unlock(&tasklist_lock);
goto out_unlock;
}
/* Record new user-specified CPU set for future reference */
p->thread.user_cpus_allowed = new_mask;
/* Unlock the task list */
read_unlock(&tasklist_lock);
/* Compute new global allowed CPU set if necessary */
if( (p->thread.mflags & MF_FPUBOUND)
&& cpus_intersects(new_mask, mt_fpu_cpumask)) {
cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
retval = set_cpus_allowed(p, effective_mask);
} else {
p->thread.mflags &= ~MF_FPUBOUND;
retval = set_cpus_allowed(p, new_mask);
}
out_unlock:
put_task_struct(p);
unlock_cpu_hotplug();
return retval;
}
/*
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
*/
asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
cpumask_t mask;
int retval;
task_t *p;
real_len = sizeof(mask);
if (len < real_len)
return -EINVAL;
lock_cpu_hotplug();
read_lock(&tasklist_lock);
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = 0;
cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
out_unlock:
read_unlock(&tasklist_lock);
unlock_cpu_hotplug();
if (retval)
return retval;
if (copy_to_user(user_mask_ptr, &mask, real_len))
return -EFAULT;
return real_len;
}
#endif /* CONFIG_MIPS_MT_FPAFF */
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
*/
void mips_mt_regdump(unsigned long mvpctl)
{
unsigned long flags;
unsigned long vpflags;
unsigned long mvpconf0;
int nvpe;
int ntc;
int i;
int tc;
unsigned long haltval;
unsigned long tcstatval;
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_soft_dump(void);
#endif /* CONFIG_MIPT_MT_SMTC */
local_irq_save(flags);
vpflags = dvpe();
printk("=== MIPS MT State Dump ===\n");
printk("-- Global State --\n");
printk(" MVPControl Passed: %08lx\n", mvpctl);
printk(" MVPControl Read: %08lx\n", vpflags);
printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
printk("-- per-VPE State --\n");
for(i = 0; i < nvpe; i++) {
for(tc = 0; tc < ntc; tc++) {
settc(tc);
if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
printk(" VPE %d\n", i);
printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
printk(" VPE%d.Status : %08lx\n",
i, read_vpe_c0_status());
printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
printk(" VPE%d.Config7 : %08lx\n",
i, read_vpe_c0_config7());
break; /* Next VPE */
}
}
}
printk("-- per-TC State --\n");
for(tc = 0; tc < ntc; tc++) {
settc(tc);
if(read_tc_c0_tcbind() == read_c0_tcbind()) {
/* Are we dumping ourself? */
haltval = 0; /* Then we're not halted, and mustn't be */
tcstatval = flags; /* And pre-dump TCStatus is flags */
printk(" TC %d (current TC with VPE EPC above)\n", tc);
} else {
haltval = read_tc_c0_tchalt();
write_tc_c0_tchalt(1);
tcstatval = read_tc_c0_tcstatus();
printk(" TC %d\n", tc);
}
printk(" TCStatus : %08lx\n", tcstatval);
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart());
printk(" TCHalt : %08lx\n", haltval);
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
if (!haltval)
write_tc_c0_tchalt(0);
}
#ifdef CONFIG_MIPS_MT_SMTC
smtc_soft_dump();
#endif /* CONFIG_MIPT_MT_SMTC */
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
}
static int mt_opt_norps = 0;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
static int mt_opt_forceconfig7 = 0;
static int mt_opt_config7 = -1;
static int __init rps_disable(char *s)
{
mt_opt_norps = 1;
return 1;
}
__setup("norps", rps_disable);
static int __init rpsctl_set(char *str)
{
get_option(&str, &mt_opt_rpsctl);
return 1;
}
__setup("rpsctl=", rpsctl_set);
static int __init nblsu_set(char *str)
{
get_option(&str, &mt_opt_nblsu);
return 1;
}
__setup("nblsu=", nblsu_set);
static int __init config7_set(char *str)
{
get_option(&str, &mt_opt_config7);
mt_opt_forceconfig7 = 1;
return 1;
}
__setup("config7=", config7_set);
/* Experimental cache flush control parameters that should go away some day */
int mt_protiflush = 0;
int mt_protdflush = 0;
int mt_n_iflushes = 1;
int mt_n_dflushes = 1;
static int __init set_protiflush(char *s)
{
mt_protiflush = 1;
return 1;
}
__setup("protiflush", set_protiflush);
static int __init set_protdflush(char *s)
{
mt_protdflush = 1;
return 1;
}
__setup("protdflush", set_protdflush);
static int __init niflush(char *s)
{
get_option(&s, &mt_n_iflushes);
return 1;
}
__setup("niflush=", niflush);
static int __init ndflush(char *s)
{
get_option(&s, &mt_n_dflushes);
return 1;
}
__setup("ndflush=", ndflush);
#ifdef CONFIG_MIPS_MT_FPAFF
static int fpaff_threshold = -1;
static int __init fpaff_thresh(char *str)
{
get_option(&str, &fpaff_threshold);
return 1;
}
__setup("fpaff=", fpaff_thresh);
#endif /* CONFIG_MIPS_MT_FPAFF */
static unsigned int itc_base = 0;
static int __init set_itc_base(char *str)
{
get_option(&str, &itc_base);
return 1;
}
__setup("itcbase=", set_itc_base);
void mips_mt_set_cpuoptions(void)
{
unsigned int oconfig7 = read_c0_config7();
unsigned int nconfig7 = oconfig7;
if (mt_opt_norps) {
printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
}
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
mt_opt_rpsctl);
if (mt_opt_rpsctl)
nconfig7 |= (1 << 2);
else
nconfig7 &= ~(1 << 2);
}
if (mt_opt_nblsu >= 0) {
printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
if (mt_opt_nblsu)
nconfig7 |= (1 << 5);
else
nconfig7 &= ~(1 << 5);
}
if (mt_opt_forceconfig7) {
printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
nconfig7 = mt_opt_config7;
}
if (oconfig7 != nconfig7) {
__asm__ __volatile("sync");
write_c0_config7(nconfig7);
ehb ();
printk("Config7: 0x%08x\n", read_c0_config7());
}
/* Report Cache management debug options */
if (mt_protiflush)
printk("I-cache flushes single-threaded\n");
if (mt_protdflush)
printk("D-cache flushes single-threaded\n");
if (mt_n_iflushes != 1)
printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
if (mt_n_dflushes != 1)
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
#ifdef CONFIG_MIPS_MT_FPAFF
/* FPU Use Factor empirically derived from experiments on 34K */
#define FPUSEFACTOR 333
if (fpaff_threshold >= 0) {
mt_fpemul_threshold = fpaff_threshold;
} else {
mt_fpemul_threshold =
(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
}
printk("FPU Affinity set after %ld emulations\n",
mt_fpemul_threshold);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
* specific to the 34K core family, which uses
* a special mode bit ("ITC") in the ErrCtl
* register to enable access to ITC control
* registers via cache "tag" operations.
*/
unsigned long ectlval;
unsigned long itcblkgrn;
/* ErrCtl register is known as "ecc" to Linux */
ectlval = read_c0_ecc();
write_c0_ecc(ectlval | (0x1 << 26));
ehb();
#define INDEX_0 (0x80000000)
#define INDEX_8 (0x80000008)
/* Read "cache tag" for Dcache pseudo-index 8 */
cache_op(Index_Load_Tag_D, INDEX_8);
ehb();
itcblkgrn = read_c0_dtaglo();
itcblkgrn &= 0xfffe0000;
/* Set for 128 byte pitch of ITC cells */
itcblkgrn |= 0x00000c00;
/* Stage in Tag register */
write_c0_dtaglo(itcblkgrn);
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_8);
/* Now set base address, and turn ITC on with 0x1 bit */
write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_0);
write_c0_ecc(ectlval);
ehb();
printk("Mapped %ld ITC cells starting at 0x%08x\n",
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
}
}
/*
* Function to protect cache flushes from concurrent execution
* depends on MP software model chosen.
*/
void mt_cflush_lockdown(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_lockdown(void);
smtc_cflush_lockdown();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_release(void);
smtc_cflush_release();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
......@@ -41,6 +41,10 @@
#include <asm/elf.h>
#include <asm/isadep.h>
#include <asm/inst.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
extern void smtc_idle_loop_hook(void);
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* The idle thread. There's no useful work to be done, so just try to conserve
......@@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched())
while (!need_resched()) {
#ifdef CONFIG_MIPS_MT_SMTC
smtc_idle_loop_hook();
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_wait)
(*cpu_wait)();
}
preempt_enable_no_resched();
schedule();
preempt_disable();
......
......@@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned int irqflags;
unsigned int mtflags;
#endif /* CONFIG_MIPS_MT_SMTC */
if (!cpu_has_fpu)
break;
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify-write of Status must be atomic */
local_irq_save(irqflags);
mtflags = dmt();
#endif /* CONFIG_MIPS_MT_SMTC */
preempt_disable();
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
......@@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
write_c0_status(flags);
}
#ifdef CONFIG_MIPS_MT_SMTC
emt(mtflags);
local_irq_restore(irqflags);
#endif /* CONFIG_MIPS_MT_SMTC */
preempt_enable();
break;
}
......
......@@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
break;
case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned int irqflags;
unsigned int mtflags;
#endif /* CONFIG_MIPS_MT_SMTC */
if (!cpu_has_fpu) {
tmp = 0;
break;
}
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify-write of Status must be atomic */
local_irq_save(irqflags);
mtflags = dmt();
#endif /* CONFIG_MIPS_MT_SMTC */
preempt_disable();
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
......@@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
write_c0_status(flags);
}
#ifdef CONFIG_MIPS_MT_SMTC
emt(mtflags);
local_irq_restore(irqflags);
#endif /* CONFIG_MIPS_MT_SMTC */
preempt_enable();
break;
}
......
......@@ -88,7 +88,18 @@
PTR_ADDIU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify-writes of Status must be atomic on a VPE */
mfc0 t2, CP0_TCSTATUS
ori t1, t2, TCSTATUS_IXMT
mtc0 t1, CP0_TCSTATUS
andi t2, t2, TCSTATUS_IXMT
ehb
DMT 8 # dmt t0
move t1,ra
jal mips_ihb
move ra,t1
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01
and t1, a3
......@@ -97,6 +108,18 @@
and a2, a3
or a2, t1
mtc0 a2, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
ehb
andi t0, t0, VPECONTROL_TE
beqz t0, 1f
emt
1:
mfc0 t1, CP0_TCSTATUS
xori t1, t1, TCSTATUS_IXMT
or t1, t1, t2
mtc0 t1, CP0_TCSTATUS
ehb
#endif /* CONFIG_MIPS_MT_SMTC */
move v0, a0
jr ra
END(resume)
......@@ -131,10 +154,19 @@ LEAF(_restore_fp)
#define FPU_DEFAULT 0x00000000
LEAF(_init_fpu)
#ifdef CONFIG_MIPS_MT_SMTC
/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
mfc0 t0, CP0_TCSTATUS
/* Bit position is the same for Status, TCStatus */
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_TCSTATUS
#else /* Normal MIPS CU1 enable */
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
fpu_enable_hazard
li t1, FPU_DEFAULT
......
/*
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
*
* Elizabeth Clarke (beth@mips.com)
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
......@@ -16,6 +12,10 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
* Elizabeth Clarke (beth@mips.com)
* Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/kernel.h>
#include <linux/sched.h>
......@@ -24,6 +24,7 @@
#include <linux/compiler.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/system.h>
......@@ -33,8 +34,8 @@
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/mips_mt.h>
#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
#define MIPS_CPU_IPI_RESCHED_IRQ 0
#define MIPS_CPU_IPI_CALL_IRQ 1
......@@ -66,6 +67,7 @@ void __init sanitize_tlb_entries(void)
if (!cpu_has_mipsmt)
return;
/* Enable VPC */
set_c0_mvpcontrol(MVPCONTROL_VPC);
back_to_back_c0_hazard();
......@@ -106,12 +108,12 @@ void __init sanitize_tlb_entries(void)
static void ipi_resched_dispatch (struct pt_regs *regs)
{
do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs);
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs);
}
static void ipi_call_dispatch (struct pt_regs *regs)
{
do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs);
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs);
}
irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
......@@ -155,6 +157,8 @@ void plat_smp_setup(void)
dvpe();
dmt();
mips_mt_set_cpuoptions();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
......@@ -189,11 +193,13 @@ void plat_smp_setup(void)
if (i != 0) {
write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
write_vpe_c0_config( read_c0_config());
/* make sure there are no software interrupts pending */
write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
}
......@@ -233,16 +239,16 @@ void plat_smp_setup(void)
/* We'll wait until starting the secondaries before starting MVPE */
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
......@@ -287,7 +293,8 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
/* global pointer */
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
......
......@@ -38,6 +38,10 @@
#include <asm/mmu_context.h>
#include <asm/smp.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
......@@ -85,6 +89,10 @@ asmlinkage void start_secondary(void)
{
unsigned int cpu;
#ifdef CONFIG_MIPS_MT_SMTC
/* Only do cpu_probe for first TC of CPU */
if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
per_cpu_trap_init();
......@@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
if (wait)
while (atomic_read(&data.finished) != cpus)
barrier();
call_data = NULL;
spin_unlock(&smp_call_lock);
return 0;
}
void smp_call_function_interrupt(void)
{
void (*func) (void *info) = call_data->func;
......
/*
* Assembly Language Functions for MIPS MT SMTC support
*/
/*
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
#include <asm/regdef.h>
#include <asm/asmmacro.h>
#include <asm/stackframe.h>
#include <asm/stackframe.h>
/*
* "Software Interrupt" linkage.
*
* This is invoked when an "Interrupt" is sent from one TC to another,
* where the TC to be interrupted is halted, has it's Restart address
* and Status values saved by the "remote control" thread, then modified
* to cause execution to begin here, in kenel mode. This code then
* disguises the TC state as that of an exception and transfers
* control to the general exception or vectored interrupt handler.
*/
.set noreorder
/*
The __smtc_ipi_vector would use k0 and k1 as temporaries and
1) Set EXL (this is per-VPE, so this can't be done by proxy!)
2) Restore the K/CU and IXMT bits to the pre "exception" state
(EXL means no interrupts and access to the kernel map).
3) Set EPC to be the saved value of TCRestart.
4) Jump to the exception handler entry point passed by the sender.
CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
*/
/*
* Reviled and slandered vision: Set EXL and restore K/CU/IXMT
* state of pre-halt thread, then save everything and call
* thought some function pointer to imaginary_exception, which
* will parse a register value or memory message queue to
* deliver things like interprocessor interrupts. On return
* from that function, jump to the global ret_from_irq code
* to invoke the scheduler and return as appropriate.
*/
#define PT_PADSLOT4 (PT_R0-8)
#define PT_PADSLOT5 (PT_R0-4)
.text
.align 5
FEXPORT(__smtc_ipi_vector)
.set noat
/* Disable thread scheduling to make Status update atomic */
DMT 27 # dmt k1
ehb
/* Set EXL */
mfc0 k0,CP0_STATUS
ori k0,k0,ST0_EXL
mtc0 k0,CP0_STATUS
ehb
/* Thread scheduling now inhibited by EXL. Restore TE state. */
andi k1,k1,VPECONTROL_TE
beqz k1,1f
emt
1:
/*
* The IPI sender has put some information on the anticipated
* kernel stack frame. If we were in user mode, this will be
* built above the saved kernel SP. If we were already in the
* kernel, it will be built above the current CPU SP.
*
* Were we in kernel mode, as indicated by CU0?
*/
sll k1,k0,3
.set noreorder
bltz k1,2f
move k1,sp
.set reorder
/*
* If previously in user mode, set CU0 and use kernel stack.
*/
li k1,ST0_CU0
or k1,k1,k0
mtc0 k1,CP0_STATUS
ehb
get_saved_sp
/* Interrupting TC will have pre-set values in slots in the new frame */
2: subu k1,k1,PT_SIZE
/* Load TCStatus Value */
lw k0,PT_TCSTATUS(k1)
/* Write it to TCStatus to restore CU/KSU/IXMT state */
mtc0 k0,$2,1
ehb
lw k0,PT_EPC(k1)
mtc0 k0,CP0_EPC
/* Save all will redundantly recompute the SP, but use it for now */
SAVE_ALL
CLI
move a0,sp
/* Function to be invoked passed stack pad slot 5 */
lw t0,PT_PADSLOT5(sp)
/* Argument from sender passed in stack pad slot 4 */
lw a1,PT_PADSLOT4(sp)
jalr t0
nop
j ret_from_irq
nop
/*
* Called from idle loop to provoke processing of queued IPIs
* First IPI message in queue passed as argument.
*/
LEAF(self_ipi)
/* Before anything else, block interrupts */
mfc0 t0,CP0_TCSTATUS
ori t1,t0,TCSTATUS_IXMT
mtc0 t1,CP0_TCSTATUS
ehb
/* We know we're in kernel mode, so prepare stack frame */
subu t1,sp,PT_SIZE
sw ra,PT_EPC(t1)
sw a0,PT_PADSLOT4(t1)
la t2,ipi_decode
sw t2,PT_PADSLOT5(t1)
/* Save pre-disable value of TCStatus */
sw t0,PT_TCSTATUS(t1)
j __smtc_ipi_vector
nop
END(self_ipi)
/*
* /proc hooks for SMTC kernel
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
#include <linux/proc_fs.h>
#include <asm/smtc_proc.h>
/*
* /proc diagnostic and statistics hooks
*/
/*
* Statistics gathered
*/
unsigned long selfipis[NR_CPUS];
struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
static struct proc_dir_entry *smtc_stats;
atomic_t smtc_fpu_recoveries;
static int proc_read_smtc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int totalen = 0;
int len;
int i;
extern unsigned long ebase;
len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
totalen += len;
page += len;
len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
totalen += len;
page += len;
len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
totalen += len;
page += len;
len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
totalen += len;
page += len;
for (i=0; i < NR_CPUS; i++) {
len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
totalen += len;
page += len;
}
len = sprintf(page, "Self-IPIs by CPU:\n");
totalen += len;
page += len;
for(i = 0; i < NR_CPUS; i++) {
len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
totalen += len;
page += len;
}
len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
totalen += len;
page += len;
return totalen;
}
void init_smtc_stats(void)
{
int i;
for (i=0; i<NR_CPUS; i++) {
smtc_cpu_stats[i].timerints = 0;
smtc_cpu_stats[i].selfipis = 0;
}
atomic_set(&smtc_fpu_recoveries, 0);
smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
proc_read_smtc, NULL);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment