Commit d9089c29 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (25 commits)
  powerpc: Disable 64K hugetlb support when doing 64K SPU mappings
  powerpc/powermac: Fixup default serial port device for pmac_zilog
  powerpc/powermac: Use sane default baudrate for SCC debugging
  powerpc/mm: Implement _PAGE_SPECIAL & pte_special() for 64-bit
  powerpc: Show processor cache information in sysfs
  powerpc: Make core id information available to userspace
  powerpc: Make core sibling information available to userspace
  powerpc/vio: More fallout from dma_mapping_error API change
  ibmveth: Fix multiple errors with dma_mapping_error conversion
  powerpc/pseries: Fix CMO sysdev attribute API change fallout
  powerpc: Enable tracehook for the architecture
  powerpc: Add TIF_NOTIFY_RESUME support for tracehook
  powerpc: Add asm/syscall.h with the tracehook entry points
  powerpc: Make syscall tracing use tracehook.h helpers
  powerpc: Call tracehook_signal_handler() when setting up signal frames
  powerpc: Update cpu_sibling_maps dynamically
  powerpc: register_cpu_online should be __cpuinit
  powerpc: kill useless SMT code in prom_hold_cpus
  powerpc: Fix 8xx build failure
  powerpc: Fix vio build warnings
  ...
parents bda426f5 00df438e
......@@ -117,6 +117,7 @@ config PPC
select HAVE_KPROBES
select HAVE_ARCH_KGDB
select HAVE_KRETPROBES
select HAVE_ARCH_TRACEHOOK
select HAVE_LMB
select HAVE_DMA_ATTRS if PPC64
select USE_GENERIC_SMP_HELPERS if SMP
......
......@@ -148,7 +148,7 @@ transfer_to_handler:
/* Check to see if the dbcr0 register is set up to debug. Use the
internal debug mode bit to do this. */
lwz r12,THREAD_DBCR0(r12)
andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
andis. r12,r12,DBCR0_IDM@h
beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */
li r12,-1 /* clear all pending debug events */
......@@ -292,7 +292,7 @@ syscall_exit_cont:
/* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
#ifdef CONFIG_44x
......@@ -343,7 +343,12 @@ syscall_dotrace:
stw r0,_TRAP(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_enter
lwz r0,GPR0(r1) /* Restore original registers */
/*
* Restore argument registers possibly just changed.
* We use the return value of do_syscall_trace_enter
* for call number to look up in the table (r0).
*/
mr r0,r3
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
......@@ -720,7 +725,7 @@ restore_user:
/* Check whether this process has its own DBCR0 value. The internal
debug mode bit tells us that dbcr0 should be loaded. */
lwz r0,THREAD+THREAD_DBCR0(r2)
andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
......@@ -1055,8 +1060,8 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
SAVE_NVGPRS(r1)
rlwinm r3,r3,0,0,30
stw r3,_TRAP(r1)
2: li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
2: addi r3,r1,STACK_FRAME_OVERHEAD
mr r4,r9
bl do_signal
REST_NVGPRS(r1)
b recheck
......
......@@ -214,7 +214,12 @@ syscall_dotrace:
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_syscall_trace_enter
ld r0,GPR0(r1) /* Restore original registers */
/*
* Restore argument registers possibly just changed.
* We use the return value of do_syscall_trace_enter
* for the call number to look up in the table (r0).
*/
mr r0,r3
ld r3,GPR3(r1)
ld r4,GPR4(r1)
ld r5,GPR5(r1)
......@@ -638,8 +643,7 @@ user_work:
b .ret_from_except_lite
1: bl .save_nvgprs
li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_signal
b .ret_from_except
......
......@@ -493,18 +493,18 @@ static int __init serial_dev_init(void)
device_initcall(serial_dev_init);
#ifdef CONFIG_SERIAL_8250_CONSOLE
/*
* This is called very early, as part of console_init() (typically just after
* time_init()). This function is respondible for trying to find a good
* default console on serial ports. It tries to match the open firmware
* default output with one of the available serial console drivers, either
* one of the platform serial ports that have been probed earlier by
* find_legacy_serial_ports() or some more platform specific ones.
* default output with one of the available serial console drivers that have
* been probed earlier by find_legacy_serial_ports()
*/
static int __init check_legacy_serial_console(void)
{
struct device_node *prom_stdout = NULL;
int speed = 0, offset = 0;
int i, speed = 0, offset = 0;
const char *name;
const u32 *spd;
......@@ -548,31 +548,20 @@ static int __init check_legacy_serial_console(void)
if (spd)
speed = *spd;
if (0)
;
#ifdef CONFIG_SERIAL_8250_CONSOLE
else if (strcmp(name, "serial") == 0) {
int i;
/* Look for it in probed array */
for (i = 0; i < legacy_serial_count; i++) {
if (prom_stdout != legacy_serial_infos[i].np)
continue;
offset = i;
speed = legacy_serial_infos[i].speed;
break;
}
if (i >= legacy_serial_count)
goto not_found;
if (strcmp(name, "serial") != 0)
goto not_found;
/* Look for it in probed array */
for (i = 0; i < legacy_serial_count; i++) {
if (prom_stdout != legacy_serial_infos[i].np)
continue;
offset = i;
speed = legacy_serial_infos[i].speed;
break;
}
#endif /* CONFIG_SERIAL_8250_CONSOLE */
#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
else if (strcmp(name, "ch-a") == 0)
offset = 0;
else if (strcmp(name, "ch-b") == 0)
offset = 1;
#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
else
if (i >= legacy_serial_count)
goto not_found;
of_node_put(prom_stdout);
DBG("Found serial console at ttyS%d\n", offset);
......@@ -591,3 +580,4 @@ static int __init check_legacy_serial_console(void)
}
console_initcall(check_legacy_serial_console);
#endif /* CONFIG_SERIAL_8250_CONSOLE */
......@@ -254,7 +254,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
return;
/* Clear the DAC and struct entries. One shot trigger */
#if (defined(CONFIG_44x) || defined(CONFIG_BOOKE))
#if defined(CONFIG_BOOKE)
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
| DBCR0_IDM));
#endif
......@@ -286,7 +286,7 @@ int set_dabr(unsigned long dabr)
mtspr(SPRN_DABR, dabr);
#endif
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE)
mtspr(SPRN_DAC1, dabr);
#endif
......@@ -373,7 +373,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
set_dabr(new->thread.dabr);
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE)
/* If new thread DAC (HW breakpoint) is the same then leave it */
if (new->thread.dabr)
set_dabr(new->thread.dabr);
......@@ -568,7 +568,7 @@ void flush_thread(void)
current->thread.dabr = 0;
set_dabr(0);
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE)
current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
#endif
}
......
......@@ -205,8 +205,6 @@ static int __initdata mem_reserve_cnt;
static cell_t __initdata regbuf[1024];
#define MAX_CPU_THREADS 2
/*
* Error results ... some OF calls will return "-1" on error, some
* will return 0, some will return either. To simplify, here are
......@@ -1339,10 +1337,6 @@ static void __init prom_hold_cpus(void)
unsigned int reg;
phandle node;
char type[64];
int cpuid = 0;
unsigned int interrupt_server[MAX_CPU_THREADS];
unsigned int cpu_threads, hw_cpu_num;
int propsize;
struct prom_t *_prom = &RELOC(prom);
unsigned long *spinloop
= (void *) LOW_ADDR(__secondary_hold_spinloop);
......@@ -1386,7 +1380,6 @@ static void __init prom_hold_cpus(void)
reg = -1;
prom_getprop(node, "reg", &reg, sizeof(reg));
prom_debug("\ncpuid = 0x%x\n", cpuid);
prom_debug("cpu hw idx = 0x%x\n", reg);
/* Init the acknowledge var which will be reset by
......@@ -1395,28 +1388,9 @@ static void __init prom_hold_cpus(void)
*/
*acknowledge = (unsigned long)-1;
propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
&interrupt_server,
sizeof(interrupt_server));
if (propsize < 0) {
/* no property. old hardware has no SMT */
cpu_threads = 1;
interrupt_server[0] = reg; /* fake it with phys id */
} else {
/* We have a threaded processor */
cpu_threads = propsize / sizeof(u32);
if (cpu_threads > MAX_CPU_THREADS) {
prom_printf("SMT: too many threads!\n"
"SMT: found %x, max is %x\n",
cpu_threads, MAX_CPU_THREADS);
cpu_threads = 1; /* ToDo: panic? */
}
}
hw_cpu_num = interrupt_server[0];
if (hw_cpu_num != _prom->cpu) {
if (reg != _prom->cpu) {
/* Primary Thread of non-boot cpu */
prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
prom_printf("starting cpu hw idx %x... ", reg);
call_prom("start-cpu", 3, 0, node,
secondary_hold, reg);
......@@ -1431,17 +1405,10 @@ static void __init prom_hold_cpus(void)
}
#ifdef CONFIG_SMP
else
prom_printf("%x : boot cpu %x\n", cpuid, reg);
prom_printf("boot cpu hw idx %x\n", reg);
#endif /* CONFIG_SMP */
/* Reserve cpu #s for secondary threads. They start later. */
cpuid += cpu_threads;
}
if (cpuid > NR_CPUS)
prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
") exceeded: ignoring extras\n");
prom_debug("prom_hold_cpus: end...\n");
}
......
......@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/user.h>
#include <linux/security.h>
......@@ -717,7 +718,7 @@ void user_disable_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE)
/* If DAC then do not single step, skip */
if (task->thread.dabr)
return;
......@@ -744,10 +745,11 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
#ifdef CONFIG_PPC64
#ifndef CONFIG_BOOKE
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
* It was assumed, on previous implementations, that 3 bits were
......@@ -769,7 +771,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
task->thread.dabr = data;
#endif
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE)
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
......@@ -1013,31 +1015,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
static void do_syscall_trace(void)
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
long ret = 0;
void do_syscall_trace_enter(struct pt_regs *regs)
{
secure_computing(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE)
&& (current->ptrace & PT_PTRACED))
do_syscall_trace();
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->gpr[0].
*/
ret = -1L;
if (unlikely(current->audit_context)) {
#ifdef CONFIG_PPC64
......@@ -1055,16 +1050,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
}
return ret ?: regs->gpr[0];
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
regs->result);
if ((test_thread_flag(TIF_SYSCALL_TRACE)
|| test_thread_flag(TIF_SINGLESTEP))
&& (current->ptrace & PT_PTRACED))
do_syscall_trace();
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
......@@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc)
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_map
* cpu_present_map
* cpu_sibling_map
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
......@@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void)
*/
cpu_init_thread_core_maps(nthreads);
}
/*
* Being that cpu_sibling_map is now a per_cpu array, then it cannot
* be initialized until the per_cpu areas have been created. This
* function is now called from setup_per_cpu_areas().
*/
void __init smp_setup_cpu_sibling_map(void)
{
#ifdef CONFIG_PPC64
int i, cpu, base;
for_each_possible_cpu(cpu) {
DBG("Sibling map for CPU %d:", cpu);
base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) {
cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
DBG(" %d", base + i);
}
DBG("\n");
}
#endif /* CONFIG_PPC64 */
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PCSPKR_PLATFORM
......
......@@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void)
paca[i].data_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
}
/* Now that per_cpu is setup, initialize cpu_sibling_map */
smp_setup_cpu_sibling_map();
}
#endif
......
......@@ -9,7 +9,7 @@
* this archive for more details.
*/
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
......@@ -112,7 +112,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
}
}
int do_signal(sigset_t *oldset, struct pt_regs *regs)
static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
{
siginfo_t info;
int signr;
......@@ -147,7 +147,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
*/
if (current->thread.dabr) {
set_dabr(current->thread.dabr);
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
#if defined(CONFIG_BOOKE)
mtspr(SPRN_DBCR0, current->thread.dbcr0);
#endif
}
......@@ -177,11 +177,28 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
* its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
*/
current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
/*
* Let tracing know that we've done the handler setup.
*/
tracehook_signal_handler(signr, &info, &ka, regs,
test_thread_flag(TIF_SINGLESTEP));
}
return ret;
}
void do_signal(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal_pending(NULL, regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r5, unsigned long r6, unsigned long r7,
unsigned long r8, struct pt_regs *regs)
......
......@@ -41,6 +41,7 @@
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cputhreads.h>
#include <asm/cputable.h>
#include <asm/system.h>
#include <asm/mpic.h>
......@@ -62,10 +63,12 @@ struct thread_info *secondary_ti;
cpumask_t cpu_possible_map = CPU_MASK_NONE;
cpumask_t cpu_online_map = CPU_MASK_NONE;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
......@@ -228,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != boot_cpuid);
cpu_set(boot_cpuid, cpu_online_map);
cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
#ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current;
#endif
......@@ -375,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
return 0;
}
/* Return the value of the reg property corresponding to the given
* logical cpu.
*/
int cpu_to_core_id(int cpu)
{
struct device_node *np;
const int *reg;
int id = -1;
np = of_get_cpu_node(cpu, NULL);
if (!np)
goto out;
reg = of_get_property(np, "reg", NULL);
if (!reg)
goto out;
id = *reg;
out:
of_node_put(np);
return id;
}
/* Must be called when no change can occur to cpu_present_map,
* i.e. during cpu online or offline.
*/
static struct device_node *cpu_to_l2cache(int cpu)
{
struct device_node *np;
const phandle *php;
phandle ph;
if (!cpu_present(cpu))
return NULL;
np = of_get_cpu_node(cpu, NULL);
if (np == NULL)
return NULL;
php = of_get_property(np, "l2-cache", NULL);
if (php == NULL)
return NULL;
ph = *php;
of_node_put(np);
return of_find_node_by_phandle(ph);
}
/* Activate a secondary processor. */
int __devinit start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
struct device_node *l2_cache;
int i, base;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
......@@ -400,6 +454,33 @@ int __devinit start_secondary(void *unused)
ipi_call_lock();
cpu_set(cpu, cpu_online_map);
/* Update sibling maps */
base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) {
if (cpu_is_offline(base + i))
continue;
cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
/* cpu_core_map should be a superset of
* cpu_sibling_map even if we don't have cache
* information, so update the former here, too.
*/
cpu_set(cpu, per_cpu(cpu_core_map, base +i));
cpu_set(base + i, per_cpu(cpu_core_map, cpu));
}
l2_cache = cpu_to_l2cache(cpu);
for_each_online_cpu(i) {
struct device_node *np = cpu_to_l2cache(i);
if (!np)
continue;
if (np == l2_cache) {
cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu_set(i, per_cpu(cpu_core_map, cpu));
}
of_node_put(np);
}
of_node_put(l2_cache);
ipi_call_unlock();
local_irq_enable();
......@@ -437,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus)
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
if (smp_ops->cpu_disable)
return smp_ops->cpu_disable();
struct device_node *l2_cache;
int cpu = smp_processor_id();
int base, i;
int err;
return -ENOSYS;
if (!smp_ops->cpu_disable)
return -ENOSYS;
err = smp_ops->cpu_disable();
if (err)
return err;
/* Update sibling maps */
base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) {
cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
}
l2_cache = cpu_to_l2cache(cpu);
for_each_present_cpu(i) {
struct device_node *np = cpu_to_l2cache(i);
if (!np)
continue;
if (np == l2_cache) {
cpu_clear(cpu, per_cpu(cpu_core_map, i));
cpu_clear(i, per_cpu(cpu_core_map, cpu));
}
of_node_put(np);
}
of_node_put(l2_cache);
return 0;
}
void __cpu_die(unsigned int cpu)
......
......@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/module.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
......
......@@ -22,6 +22,8 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
/* SMT stuff */
#ifdef CONFIG_PPC_MULTIPLATFORM
......@@ -297,8 +299,289 @@ static struct sysdev_attribute pa6t_attrs[] = {
#endif /* CONFIG_DEBUG_KERNEL */
};
struct cache_desc {
struct kobject kobj;
struct cache_desc *next;
const char *type; /* Instruction, Data, or Unified */
u32 size; /* total cache size in KB */
u32 line_size; /* in bytes */
u32 nr_sets; /* number of sets */
u32 level; /* e.g. 1, 2, 3... */
u32 associativity; /* e.g. 8-way... 0 is fully associative */
};
DEFINE_PER_CPU(struct cache_desc *, cache_desc);
static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
{
return container_of(k, struct cache_desc, kobj);
}
static void cache_desc_release(struct kobject *k)
{
struct cache_desc *desc = kobj_to_cache_desc(k);
pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
if (desc->next)
kobject_put(&desc->next->kobj);
kfree(kobj_to_cache_desc(k));
}
static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
{
struct kobj_attribute *kobj_attr;
kobj_attr = container_of(attr, struct kobj_attribute, attr);
return kobj_attr->show(k, kobj_attr, buf);
}
static struct sysfs_ops cache_desc_sysfs_ops = {
.show = cache_desc_show,
};
static struct kobj_type cache_desc_type = {
.release = cache_desc_release,
.sysfs_ops = &cache_desc_sysfs_ops,
};
static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%uK\n", cache->size);
}
static struct kobj_attribute cache_size_attr =
__ATTR(size, 0444, cache_size_show, NULL);
static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->line_size);
}
static struct kobj_attribute cache_line_size_attr =
__ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->nr_sets);
}
static struct kobj_attribute cache_nr_sets_attr =
__ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%s\n", cache->type);
}
static struct kobj_attribute cache_type_attr =
__ATTR(type, 0444, cache_type_show, NULL);
static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->level);
}
static struct kobj_attribute cache_level_attr =
__ATTR(level, 0444, cache_level_show, NULL);
static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_desc *cache = kobj_to_cache_desc(k);
return sprintf(buf, "%u\n", cache->associativity);
}
static struct kobj_attribute cache_assoc_attr =
__ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
struct cache_desc_info {
const char *type;
const char *size_prop;
const char *line_size_prop;
const char *nr_sets_prop;
};
/* PowerPC Processor binding says the [di]-cache-* must be equal on
* unified caches, so just use d-cache properties. */
static struct cache_desc_info ucache_info = {
.type = "Unified",
.size_prop = "d-cache-size",
.line_size_prop = "d-cache-line-size",
.nr_sets_prop = "d-cache-sets",
};
static void register_cpu_online(unsigned int cpu)
static struct cache_desc_info dcache_info = {
.type = "Data",
.size_prop = "d-cache-size",
.line_size_prop = "d-cache-line-size",
.nr_sets_prop = "d-cache-sets",
};
static struct cache_desc_info icache_info = {
.type = "Instruction",
.size_prop = "i-cache-size",
.line_size_prop = "i-cache-line-size",
.nr_sets_prop = "i-cache-sets",
};
static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
{
const u32 *cache_line_size;
struct cache_desc *new;
const u32 *cache_size;
const u32 *nr_sets;
int rc;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
"index%d", index);
if (rc)
goto err;
/* type */
new->type = info->type;
rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
WARN_ON(rc);
/* level */
new->level = level;
rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
WARN_ON(rc);
/* size */
cache_size = of_get_property(np, info->size_prop, NULL);
if (cache_size) {
new->size = *cache_size / 1024;
rc = sysfs_create_file(&new->kobj,
&cache_size_attr.attr);
WARN_ON(rc);
}
/* coherency_line_size */
cache_line_size = of_get_property(np, info->line_size_prop, NULL);
if (cache_line_size) {
new->line_size = *cache_line_size;
rc = sysfs_create_file(&new->kobj,
&cache_line_size_attr.attr);
WARN_ON(rc);
}
/* number_of_sets */
nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
if (nr_sets) {
new->nr_sets = *nr_sets;
rc = sysfs_create_file(&new->kobj,
&cache_nr_sets_attr.attr);
WARN_ON(rc);
}
/* ways_of_associativity */
if (new->nr_sets == 1) {
/* fully associative */
new->associativity = 0;
goto create_assoc;
}
if (new->nr_sets && new->size && new->line_size) {
/* If we have values for all of these we can derive
* the associativity. */
new->associativity =
((new->size * 1024) / new->nr_sets) / new->line_size;
create_assoc:
rc = sysfs_create_file(&new->kobj,
&cache_assoc_attr.attr);
WARN_ON(rc);
}
return new;
err:
kfree(new);
return NULL;
}
static bool cache_is_unified(struct device_node *np)
{
return of_get_property(np, "cache-unified", NULL);
}
static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
{
const phandle *next_cache_phandle;
struct device_node *next_cache;
struct cache_desc *new, **end;
pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
if (cache_is_unified(np)) {
new = create_cache_desc(np, parent, index, level,
&ucache_info);
} else {
new = create_cache_desc(np, parent, index, level,
&dcache_info);
if (new) {
index++;
new->next = create_cache_desc(np, parent, index, level,
&icache_info);
}
}
if (!new)
return NULL;
end = &new->next;
while (*end)
end = &(*end)->next;
next_cache_phandle = of_get_property(np, "l2-cache", NULL);
if (!next_cache_phandle)
goto out;
next_cache = of_find_node_by_phandle(*next_cache_phandle);
if (!next_cache)
goto out;
*end = create_cache_index_info(next_cache, parent, ++index, ++level);
of_node_put(next_cache);
out:
return new;
}
static void __cpuinit create_cache_info(struct sys_device *sysdev)
{
struct kobject *cache_toplevel;
struct device_node *np = NULL;
int cpu = sysdev->id;
cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
if (!cache_toplevel)
return;
per_cpu(cache_toplevel, cpu) = cache_toplevel;
np = of_get_cpu_node(cpu, NULL);
if (np != NULL) {
per_cpu(cache_desc, cpu) =
create_cache_index_info(np, cache_toplevel, 0, 1);
of_node_put(np);
}
return;
}
static void __cpuinit register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
......@@ -346,9 +629,33 @@ static void register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_DSCR))
sysdev_create_file(s, &attr_dscr);
create_cache_info(s);
}
#ifdef CONFIG_HOTPLUG_CPU
static void remove_cache_info(struct sys_device *sysdev)
{
struct kobject *cache_toplevel;
struct cache_desc *cache_desc;
int cpu = sysdev->id;
cache_desc = per_cpu(cache_desc, cpu);
if (cache_desc != NULL) {
sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr);
sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr);
sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr);
sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr);
sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr);
sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr);
kobject_put(&cache_desc->kobj);
}
cache_toplevel = per_cpu(cache_toplevel, cpu);
if (cache_toplevel != NULL)
kobject_put(cache_toplevel);
}
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
......@@ -399,6 +706,8 @@ static void unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_DSCR))
sysdev_remove_file(s, &attr_dscr);
remove_cache_info(s);
}
#endif /* CONFIG_HOTPLUG_CPU */
......
......@@ -530,7 +530,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
}
ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
if (unlikely(dma_mapping_error(ret))) {
if (unlikely(dma_mapping_error(dev, ret))) {
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
......@@ -1031,8 +1031,8 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
static void vio_cmo_bus_init() {}
static void vio_cmo_sysfs_init() { }
static void vio_cmo_bus_init(void) {}
static void vio_cmo_sysfs_init(void) { }
#endif /* CONFIG_PPC_SMLPAR */
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
......
......@@ -736,14 +736,21 @@ static int __init hugetlbpage_init(void)
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
return -ENODEV;
/* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
* and adjust PTE_NONCACHE_NUM if the number of supported huge page
* sizes changes.
*/
set_huge_psize(MMU_PAGE_16M);
set_huge_psize(MMU_PAGE_64K);
set_huge_psize(MMU_PAGE_16G);
/* Temporarily disable support for 64K huge pages when 64K SPU local
* store support is enabled as the current implementation conflicts.
*/
#ifndef CONFIG_SPU_FS_64K_LS
set_huge_psize(MMU_PAGE_64K);
#endif
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
if (mmu_huge_psizes[psize]) {
huge_pgtable_cache(psize) = kmem_cache_create(
......
......@@ -541,6 +541,78 @@ static int __init pmac_declare_of_platform_devices(void)
}
machine_device_initcall(powermac, pmac_declare_of_platform_devices);
#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
/*
* This is called very early, as part of console_init() (typically just after
* time_init()). This function is respondible for trying to find a good
* default console on serial ports. It tries to match the open firmware
* default output with one of the available serial console drivers.
*/
static int __init check_pmac_serial_console(void)
{
struct device_node *prom_stdout = NULL;
int offset = 0;
const char *name;
#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
char *devname = "ttyS";
#else
char *devname = "ttyPZ";
#endif
pr_debug(" -> check_pmac_serial_console()\n");
/* The user has requested a console so this is already set up. */
if (strstr(boot_command_line, "console=")) {
pr_debug(" console was specified !\n");
return -EBUSY;
}
if (!of_chosen) {
pr_debug(" of_chosen is NULL !\n");
return -ENODEV;
}
/* We are getting a weird phandle from OF ... */
/* ... So use the full path instead */
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name == NULL) {
pr_debug(" no linux,stdout-path !\n");
return -ENODEV;
}
prom_stdout = of_find_node_by_path(name);
if (!prom_stdout) {
pr_debug(" can't find stdout package %s !\n", name);
return -ENODEV;
}
pr_debug("stdout is %s\n", prom_stdout->full_name);
name = of_get_property(prom_stdout, "name", NULL);
if (!name) {
pr_debug(" stdout package has no name !\n");
goto not_found;
}
if (strcmp(name, "ch-a") == 0)
offset = 0;
else if (strcmp(name, "ch-b") == 0)
offset = 1;
else
goto not_found;
of_node_put(prom_stdout);
pr_debug("Found serial console at %s%d\n", devname, offset);
return add_preferred_console(devname, offset, NULL);
not_found:
pr_debug("No preferred console found !\n");
of_node_put(prom_stdout);
return -ENODEV;
}
console_initcall(check_pmac_serial_console);
#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
......
......@@ -125,13 +125,23 @@ void udbg_scc_init(int force_scc)
out_8(sccc, 0xc0);
/* If SCC was the OF output port, read the BRG value, else
* Setup for 57600 8N1
* Setup for 38400 or 57600 8N1 depending on the machine
*/
if (ch_def != NULL) {
out_8(sccc, 13);
scc_inittab[1] = in_8(sccc);
out_8(sccc, 12);
scc_inittab[3] = in_8(sccc);
} else if (machine_is_compatible("RackMac1,1")
|| machine_is_compatible("RackMac1,2")
|| machine_is_compatible("MacRISC4")) {
/* Xserves and G5s default to 57600 */
scc_inittab[1] = 0;
scc_inittab[3] = 0;
} else {
/* Others default to 38400 */
scc_inittab[1] = 0;
scc_inittab[3] = 1;
}
for (i = 0; i < sizeof(scc_inittab); ++i)
......
......@@ -289,7 +289,9 @@ static int cmm_thread(void *dummy)
}
#define CMM_SHOW(name, format, args...) \
static ssize_t show_##name(struct sys_device *dev, char *buf) \
static ssize_t show_##name(struct sys_device *dev, \
struct sysdev_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, format, ##args); \
} \
......@@ -298,12 +300,14 @@ static int cmm_thread(void *dummy)
CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
static ssize_t show_oom_pages(struct sys_device *dev, char *buf)
static ssize_t show_oom_pages(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
}
static ssize_t store_oom_pages(struct sys_device *dev,
struct sysdev_attribute *attr,
const char *buf, size_t count)
{
unsigned long val = simple_strtoul (buf, NULL, 10);
......
......@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto failure;
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
......@@ -294,7 +294,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
pool->consumer_index = pool->size - 1;
else
pool->consumer_index--;
if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
......@@ -488,7 +488,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
&adapter->rx_buff_pool[i]);
if (adapter->bounce_buffer != NULL) {
if (!dma_mapping_error(adapter->bounce_buffer_dma)) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
dma_unmap_single(&adapter->vdev->dev,
adapter->bounce_buffer_dma,
adapter->netdev->mtu + IBMVETH_BUFF_OH,
......@@ -924,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
buf[1] = 0;
}
if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
if (!firmware_has_feature(FW_FEATURE_CMO))
ibmveth_error_printk("tx: unable to map xmit buffer\n");
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
......
......@@ -13,3 +13,9 @@ config OF_I2C
depends on PPC_OF && I2C
help
OpenFirmware I2C accessors
config OF_SPI
def_tristate SPI
depends on OF && PPC_OF && SPI
help
OpenFirmware SPI accessors
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment