Commit d20ead9e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86

* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (114 commits)
  x86: delete vsyscall files during make clean
  kbuild: fix typo SRCARCH in find_sources
  x86: fix kernel rebuild due to vsyscall fallout
  .gitignore update for x86 arch
  x86: unify include/asm/debugreg_32/64.h
  x86: unify include/asm/unwind_32/64.h
  x86: unify include/asm/types_32/64.h
  x86: unify include/asm/tlb_32/64.h
  x86: unify include/asm/siginfo_32/64.h
  x86: unify include/asm/bug_32/64.h
  x86: unify include/asm/mman_32/64.h
  x86: unify include/asm/agp_32/64.h
  x86: unify include/asm/kdebug_32/64.h
  x86: unify include/asm/ioctls_32/64.h
  x86: unify include/asm/floppy_32/64.h
  x86: apply missing DMA/OOM prevention to floppy_32.h
  x86: unify include/asm/cache_32/64.h
  x86: unify include/asm/cache_32/64.h
  x86: unify include/asm/dmi_32/64.h
  x86: unify include/asm/delay_32/64.h
  ...
parents c56ec763 88e4d250
......@@ -112,7 +112,7 @@ struct stack_frame {
static inline unsigned long print_context_stack(struct thread_info *tinfo,
unsigned long *stack, unsigned long ebp,
struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops, void *data)
{
#ifdef CONFIG_FRAME_POINTER
struct stack_frame *frame = (struct stack_frame *)ebp;
......@@ -149,7 +149,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack,
struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops, void *data)
{
unsigned long ebp = 0;
......@@ -221,7 +221,7 @@ static void print_trace_address(void *data, unsigned long addr)
touch_nmi_watchdog();
}
static struct stacktrace_ops print_trace_ops = {
static const struct stacktrace_ops print_trace_ops = {
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
.stack = print_trace_stack,
......@@ -398,31 +398,24 @@ void die(const char * str, struct pt_regs * regs, long err)
local_save_flags(flags);
if (++die.lock_owner_depth < 3) {
int nl = 0;
unsigned long esp;
unsigned short ss;
report_bug(regs->eip, regs);
printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
++die_counter);
#ifdef CONFIG_PREEMPT
printk(KERN_EMERG "PREEMPT ");
nl = 1;
printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
if (!nl)
printk(KERN_EMERG);
printk("SMP ");
nl = 1;
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
if (!nl)
printk(KERN_EMERG);
printk("DEBUG_PAGEALLOC");
nl = 1;
#endif
if (nl)
printk("\n");
printk("\n");
if (notify_die(DIE_OOPS, str, regs, err,
current->thread.trap_no, SIGSEGV) !=
NOTIFY_STOP) {
......@@ -1112,20 +1105,6 @@ asmlinkage void math_emulate(long arg)
#endif /* CONFIG_MATH_EMULATION */
#ifdef CONFIG_X86_F00F_BUG
void __init trap_init_f00f_bug(void)
{
__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
/*
* Update the IDT descriptor and reload the IDT so that
* it uses the read-only mapped virtual address.
*/
idt_descr.address = fix_to_virt(FIX_F00F_IDT);
load_idt(&idt_descr);
}
#endif
/*
* This needs to use 'idt_table' rather than 'idt', and
* thus use the _nonmapped_ version of the IDT, as the
......
......@@ -215,7 +215,7 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
unsigned long *stack,
struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
......@@ -336,7 +336,7 @@ static void print_trace_address(void *data, unsigned long addr)
printk_address(addr);
}
static struct stacktrace_ops print_trace_ops = {
static const struct stacktrace_ops print_trace_ops = {
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
.stack = print_trace_stack,
......
......@@ -349,10 +349,10 @@ __cpuinit int unsynchronized_tsc(void)
static void __init check_geode_tsc_reliable(void)
{
unsigned long val;
unsigned long res_low, res_high;
rdmsrl(MSR_GEODE_BUSCONT_CONF0, val);
if ((val & RTSC_SUSP))
rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
if (res_low & RTSC_SUSP)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
}
#else
......
......@@ -78,7 +78,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.sys_tz = sys_tz;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
......@@ -289,7 +288,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
unsigned long *d;
unsigned long node = 0;
#ifdef CONFIG_NUMA
node = cpu_to_node[cpu];
node = cpu_to_node(cpu);
#endif
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);
......
......@@ -14,7 +14,7 @@ find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len)
/* could test bitsliced, but it's hardly worth it */
end = n+len;
if (end >= nbits)
if (end > nbits)
return -1;
for (i = n+1; i < end; i++) {
if (test_bit(i, bitmap)) {
......
......@@ -26,27 +26,18 @@ static void __rdmsr_safe_on_cpu(void *info)
static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
{
int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
if (safe)
err = rdmsr_safe(msr_no, l, h);
else
rdmsr(msr_no, *l, *h);
else {
struct msr_info rv;
rv.msr_no = msr_no;
if (safe) {
smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
&rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
}
*l = rv.l;
*h = rv.h;
struct msr_info rv;
rv.msr_no = msr_no;
if (safe) {
smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
}
preempt_enable();
*l = rv.l;
*h = rv.h;
return err;
}
......@@ -67,27 +58,18 @@ static void __wrmsr_safe_on_cpu(void *info)
static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
{
int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
if (safe)
err = wrmsr_safe(msr_no, l, h);
else
wrmsr(msr_no, l, h);
else {
struct msr_info rv;
rv.msr_no = msr_no;
rv.l = l;
rv.h = h;
if (safe) {
smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
&rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
}
struct msr_info rv;
rv.msr_no = msr_no;
rv.l = l;
rv.h = h;
if (safe) {
smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
}
preempt_enable();
return err;
}
......
......@@ -2,7 +2,7 @@
#include <linux/linkage.h>
#include <asm/rwlock.h>
#include <asm/alternative-asm.i>
#include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
/* rdi: pointer to rwlock_t */
......
......@@ -15,8 +15,8 @@
#include <linux/linkage.h>
#include <asm/rwlock.h>
#include <asm/alternative-asm.i>
#include <asm/frame.i>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#include <asm/dwarf2.h>
/*
......
......@@ -160,26 +160,6 @@ char *strchr(const char * s, int c)
EXPORT_SYMBOL(strchr);
#endif
#ifdef __HAVE_ARCH_STRRCHR
char *strrchr(const char * s, int c)
{
int d0, d1;
char * res;
asm volatile( "movb %%al,%%ah\n"
"1:\tlodsb\n\t"
"cmpb %%ah,%%al\n\t"
"jne 2f\n\t"
"leal -1(%%esi),%0\n"
"2:\ttestb %%al,%%al\n\t"
"jne 1b"
:"=g" (res), "=&S" (d0), "=&a" (d1)
:"0" (0),"1" (s),"2" (c)
:"memory");
return res;
}
EXPORT_SYMBOL(strrchr);
#endif
#ifdef __HAVE_ARCH_STRLEN
size_t strlen(const char * s)
{
......
......@@ -35,7 +35,11 @@ void __init pre_intr_init_hook(void)
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
/**
* intr_init_hook - post gate setup interrupt initialisation
......
......@@ -46,11 +46,11 @@
* ES7000 Globals
*/
volatile unsigned long *psai = NULL;
struct mip_reg *mip_reg;
struct mip_reg *host_reg;
int mip_port;
unsigned long mip_addr, host_addr;
static volatile unsigned long *psai = NULL;
static struct mip_reg *mip_reg;
static struct mip_reg *host_reg;
static int mip_port;
static unsigned long mip_addr, host_addr;
/*
* GSI override for ES7000 platforms.
......@@ -288,28 +288,8 @@ es7000_start_cpu(int cpu, unsigned long eip)
}
int
es7000_stop_cpu(int cpu)
{
int startup;
if (psai == NULL)
return -1;
startup= (0x1000000 | cpu);
while ((*psai & 0xff00ffff) != startup)
;
startup = (*psai & 0xff0000) >> 16;
*psai &= 0xffffff;
return 0;
}
void __init
es7000_sw_apic()
es7000_sw_apic(void)
{
if (es7000_plat) {
int mip_status;
......
......@@ -22,7 +22,7 @@ extern struct genapic apic_default;
struct genapic *genapic = &apic_default;
struct genapic *apic_probe[] __initdata = {
static struct genapic *apic_probe[] __initdata = {
&apic_summit,
&apic_bigsmp,
&apic_es7000,
......
......@@ -18,7 +18,11 @@ void __init pre_intr_init_hook(void)
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
void __init intr_init_hook(void)
{
......
......@@ -442,8 +442,8 @@ static __u32 __init
setup_trampoline(void)
{
/* these two are global symbols in trampoline.S */
extern __u8 trampoline_end[];
extern __u8 trampoline_data[];
extern const __u8 trampoline_end[];
extern const __u8 trampoline_data[];
memcpy((__u8 *)trampoline_base, trampoline_data,
trampoline_end - trampoline_data);
......@@ -1037,6 +1037,7 @@ smp_call_function_interrupt(void)
*/
irq_enter();
(*func)(info);
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
if (wait) {
mb();
......
......@@ -103,14 +103,14 @@ extern unsigned long highend_pfn, highstart_pfn;
#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
unsigned long node_remap_start_pfn[MAX_NUMNODES];
static unsigned long node_remap_start_pfn[MAX_NUMNODES];
unsigned long node_remap_size[MAX_NUMNODES];
unsigned long node_remap_offset[MAX_NUMNODES];
void *node_remap_start_vaddr[MAX_NUMNODES];
static unsigned long node_remap_offset[MAX_NUMNODES];
static void *node_remap_start_vaddr[MAX_NUMNODES];
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
void *node_remap_end_vaddr[MAX_NUMNODES];
void *node_remap_alloc_vaddr[MAX_NUMNODES];
static void *node_remap_end_vaddr[MAX_NUMNODES];
static void *node_remap_alloc_vaddr[MAX_NUMNODES];
static unsigned long kva_start_pfn;
static unsigned long kva_pages;
/*
......
......@@ -105,7 +105,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
LDT and other horrors are only used in user space. */
if (seg & (1<<2)) {
/* Must lock the LDT while reading it. */
down(&current->mm->context.sem);
mutex_lock(&current->mm->context.lock);
desc = current->mm->context.ldt;
desc = (void *)desc + (seg & ~7);
} else {
......@@ -118,7 +118,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
base = get_desc_base((unsigned long *)desc);
if (seg & (1<<2)) {
up(&current->mm->context.sem);
mutex_unlock(&current->mm->context.lock);
} else
put_cpu();
......@@ -539,23 +539,22 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
printk(KERN_ALERT "BUG: unable to handle kernel paging"
" request");
printk(" at virtual address %08lx\n",address);
printk(KERN_ALERT " printing eip:\n");
printk("%08lx\n", regs->eip);
printk(KERN_ALERT "printing eip: %08lx ", regs->eip);
page = read_cr3();
page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
#ifdef CONFIG_X86_PAE
printk(KERN_ALERT "*pdpt = %016Lx\n", page);
printk("*pdpt = %016Lx ", page);
if ((page >> PAGE_SHIFT) < max_low_pfn
&& page & _PAGE_PRESENT) {
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
& (PTRS_PER_PMD - 1)];
printk(KERN_ALERT "*pde = %016Lx\n", page);
printk(KERN_ALERT "*pde = %016Lx ", page);
page &= ~_PAGE_NX;
}
#else
printk(KERN_ALERT "*pde = %08lx\n", page);
printk("*pde = %08lx ", page);
#endif
/*
......@@ -569,8 +568,10 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
& (PTRS_PER_PTE - 1)];
printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page);
printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
}
printk("\n");
}
tsk->thread.cr2 = address;
......
......@@ -85,13 +85,20 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
pte_t *page_table = NULL;
#ifdef CONFIG_DEBUG_PAGEALLOC
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
if (!page_table)
page_table =
(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
}
return pte_offset_kernel(pmd, 0);
}
......
......@@ -166,7 +166,7 @@ early_node_mem(int nodeid, unsigned long start, unsigned long end,
return __va(mem);
ptr = __alloc_bootmem_nopanic(size,
SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
if (ptr == 0) {
if (ptr == NULL) {
printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
size, nodeid);
return NULL;
......@@ -261,7 +261,7 @@ void __init numa_init_array(void)
We round robin the existing nodes. */
rr = first_node(node_online_map);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] != NUMA_NO_NODE)
if (cpu_to_node(i) != NUMA_NO_NODE)
continue;
numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
......@@ -543,7 +543,7 @@ __cpuinit void numa_add_cpu(int cpu)
void __cpuinit numa_set_node(int cpu, int node)
{
cpu_pda(cpu)->nodenumber = node;
cpu_to_node[cpu] = node;
cpu_to_node(cpu) = node;
}
unsigned long __init numa_free_all_bootmem(void)
......
......@@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
static void cache_flush_page(struct page *p)
{
unsigned long adr = (unsigned long)page_address(p);
void *adr = page_address(p);
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
asm volatile("clflush (%0)" :: "r" (adr + i));
clflush(adr+i);
}
static void flush_kernel_map(void *arg)
......
......@@ -65,7 +65,7 @@ static void cache_flush_page(void *adr)
{
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
asm volatile("clflush (%0)" :: "r" (adr + i));
clflush(adr+i);
}
static void flush_kernel_map(void *arg)
......@@ -148,6 +148,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
split = split_large_page(address, prot, ref_prot2);
if (!split)
return -ENOMEM;
pgprot_val(ref_prot2) &= ~_PAGE_NX;
set_pte(kpte, mk_pte(split, ref_prot2));
kpte_page = split;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment