Commit bce6824c authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'x86/core' into x86/build, to avoid conflicts

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 494b5168 c0554d2d
......@@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
arches.
v86d source code can be downloaded from the following website:
http://dev.gentoo.org/~spock/projects/uvesafb
https://github.com/mjanusz/v86d
Please refer to the v86d documentation for detailed configuration and
installation instructions.
......@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
--
Michal Januszewski <spock@gentoo.org>
Last updated: 2009-03-30
Last updated: 2017-10-10
Documentation of the uvesafb options is loosely based on vesafb.txt.
......@@ -1251,7 +1251,7 @@ N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
M: Antoine Tenart <antoine.tenart@free-electrons.com>
M: Antoine Tenart <antoine.tenart@bootlin.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-alpine/
......@@ -15395,7 +15395,7 @@ S: Maintained
UVESAFB DRIVER
M: Michal Januszewski <spock@gentoo.org>
L: linux-fbdev@vger.kernel.org
W: http://dev.gentoo.org/~spock/projects/uvesafb/
W: https://github.com/mjanusz/v86d
S: Maintained
F: Documentation/fb/uvesafb.txt
F: drivers/video/fbdev/uvesafb.*
......
......@@ -11,6 +11,7 @@
#include "sama5d2-pinfunc.h"
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/at91.h>
/ {
model = "Atmel SAMA5D2 PTC EK";
......@@ -299,6 +300,7 @@
<PIN_PA30__NWE_NANDWE>,
<PIN_PB2__NRD_NANDOE>;
bias-pull-up;
atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
};
ale_cle_rdy_cs {
......
......@@ -106,21 +106,23 @@
global_timer: timer@1e200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x1e200 0x20>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&axi_clk>;
};
local_timer: local-timer@1e600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e600 0x20>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_EDGE_RISING)>;
clocks = <&axi_clk>;
};
twd_watchdog: watchdog@1e620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0x1e620 0x20>;
interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_LEVEL_HIGH)>;
};
armpll: armpll {
......@@ -158,7 +160,7 @@
serial0: serial@600 {
compatible = "brcm,bcm6345-uart";
reg = <0x600 0x1b>;
interrupts = <GIC_SPI 32 0>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>;
clock-names = "periph";
status = "disabled";
......@@ -167,7 +169,7 @@
serial1: serial@620 {
compatible = "brcm,bcm6345-uart";
reg = <0x620 0x1b>;
interrupts = <GIC_SPI 33 0>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>;
clock-names = "periph";
status = "disabled";
......@@ -180,7 +182,7 @@
reg = <0x2000 0x600>, <0xf0 0x10>;
reg-names = "nand", "nand-int-base";
status = "disabled";
interrupts = <GIC_SPI 38 0>;
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nand";
};
......
......@@ -1078,8 +1078,8 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc SPI6_K>;
resets = <&rcc SPI6_R>;
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
<&mdma1 35 0x0 0x40002 0x0 0x0 0>;
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
<&mdma1 35 0x0 0x40002 0x0 0x0>;
dma-names = "rx", "tx";
status = "disabled";
};
......
......@@ -800,8 +800,7 @@
};
hdmi_phy: hdmi-phy@1ef0000 {
compatible = "allwinner,sun8i-r40-hdmi-phy",
"allwinner,sun50i-a64-hdmi-phy";
compatible = "allwinner,sun8i-r40-hdmi-phy";
reg = <0x01ef0000 0x10000>;
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
<&ccu 7>, <&ccu 16>;
......
......@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
static int validate_core_offset(const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size;
switch (off) {
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
case KVM_REG_ARM_CORE_REG(regs.sp):
case KVM_REG_ARM_CORE_REG(regs.pc):
case KVM_REG_ARM_CORE_REG(regs.pstate):
case KVM_REG_ARM_CORE_REG(sp_el1):
case KVM_REG_ARM_CORE_REG(elr_el1):
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
size = sizeof(__u64);
break;
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
size = sizeof(__uint128_t);
break;
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
size = sizeof(__u32);
break;
default:
return -EINVAL;
}
if (KVM_REG_SIZE(reg->id) == size &&
IS_ALIGNED(off, size / sizeof(__u32)))
return 0;
return -EINVAL;
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
......@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (validate_core_offset(reg))
return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
......@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (validate_core_offset(reg))
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
......@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
if (!system_supports_32bit_el0())
return -EINVAL;
break;
case PSR_AA32_MODE_FIQ:
case PSR_AA32_MODE_IRQ:
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
if (vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
default:
err = -EINVAL;
......
......@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
/*
* If HW_AFDBM is enabled, then the HW could turn on
* the dirty bit for any page in the set, so check
* them all. All hugetlb entries are already young.
* the dirty or accessed bit for any page in the set,
* so check them all.
*/
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
if (valid) {
......@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
}
/*
* huge_ptep_set_access_flags will update access flags (dirty, accesssed)
* and write permission.
*
* For a contiguous huge pte range we need to check whether or not write
* permission has to change only on the first pte in the set. Then for
* all the contiguous ptes we need to check whether or not there is a
* discrepancy between dirty or young.
*/
static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
int i;
if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
return 1;
for (i = 0; i < ncontig; i++) {
pte_t orig_pte = huge_ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte))
return 1;
if (pte_young(pte) != pte_young(orig_pte))
return 1;
}
return 0;
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
int ncontig, i, changed = 0;
int ncontig, i;
size_t pgsize = 0;
unsigned long pfn = pte_pfn(pte), dpfn;
pgprot_t hugeprot;
......@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
if (!__cont_access_flags_changed(ptep, pte, ncontig))
return 0;
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
if (!pte_same(orig_pte, pte))
changed = 1;
/* Make sure we don't lose the dirty state */
/* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
pte = pte_mkdirty(pte);
if (pte_young(orig_pte))
pte = pte_mkyoung(pte);
hugeprot = pte_pgprot(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
return changed;
return 1;
}
void huge_ptep_set_wrprotect(struct mm_struct *mm,
......
......@@ -130,6 +130,9 @@
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
......@@ -165,8 +168,8 @@
jmp copy_user_handle_tail
.previous
_ASM_EXTABLE(100b,103b)
_ASM_EXTABLE(101b,103b)
_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm
#else
......@@ -182,6 +185,9 @@
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
# define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
......
......@@ -29,7 +29,8 @@ struct pt_regs;
(b)->handler = (tmp).handler - (delta); \
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr);
extern int fixup_exception(struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr);
extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
......
......@@ -226,7 +226,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
"3: movl $-2,%[err]\n\t" \
"jmp 2b\n\t" \
".popsection\n\t" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
......
......@@ -20,7 +20,7 @@
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0))
......@@ -36,8 +36,8 @@
"4:\tmov\t%5, %1\n" \
"\tjmp\t3b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
_ASM_EXTABLE_UA(1b, 4b) \
_ASM_EXTABLE_UA(2b, 4b) \
: "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
......
......@@ -37,8 +37,10 @@ struct pt_regs {
unsigned short __esh;
unsigned short fs;
unsigned short __fsh;
/* On interrupt, gs and __gsh store the vector number. */
unsigned short gs;
unsigned short __gsh;
/* On interrupt, this is the error code. */
unsigned long orig_ax;
unsigned long ip;
unsigned short cs;
......
......@@ -198,8 +198,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
"4: movl %3,%0\n" \
" jmp 3b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
_ASM_EXTABLE_UA(1b, 4b) \
_ASM_EXTABLE_UA(2b, 4b) \
: "=r" (err) \
: "A" (x), "r" (addr), "i" (errret), "0" (err))
......@@ -340,8 +340,8 @@ do { \
" xorl %%edx,%%edx\n" \
" jmp 3b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
_ASM_EXTABLE_UA(1b, 4b) \
_ASM_EXTABLE_UA(2b, 4b) \
: "=r" (retval), "=&A"(x) \
: "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
"i" (errret), "0" (retval)); \
......@@ -386,7 +386,7 @@ do { \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
......@@ -398,7 +398,7 @@ do { \
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
......@@ -474,7 +474,7 @@ struct __large_struct { unsigned long buf[100]; };
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
......@@ -602,7 +602,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "q" (__new), "1" (__old) \
: "memory" \
......@@ -618,7 +618,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
......@@ -634,7 +634,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
......@@ -653,7 +653,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
_ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
......
......@@ -1315,7 +1315,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
local_irq_disable();
ist_end_non_atomic();
} else {
if (!fixup_exception(regs, X86_TRAP_MC))
if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
mce_panic("Failed kernel mode recovery", &m, NULL);
}
......
......@@ -1020,50 +1020,12 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (fixup_exception(regs, trapnr))
return 1;
/*
* fixup routine could not handle it,
* Let do_page_fault() fix it.
*/
}
return 0;
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
/*
* Wrapper routine for handling exceptions.
*/
int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct die_args *args = data;
int ret = NOTIFY_DONE;
if (args->regs && user_mode(args->regs))
return ret;
if (val == DIE_GPF) {
/*
* To be potentially processing a kprobe fault and to
* trust the result from kprobe_running(), we have
* be non-preemptible.
*/
if (!preemptible() && kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
}
return ret;
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
bool arch_within_kprobe_blacklist(unsigned long addr)
{
bool is_in_entry_trampoline_section = false;
......
......@@ -206,7 +206,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
}
if (!user_mode(regs)) {
if (fixup_exception(regs, trapnr))
if (fixup_exception(regs, trapnr, error_code, 0))
return 0;
tsk->thread.error_code = error_code;
......@@ -551,11 +551,21 @@ do_general_protection(struct pt_regs *regs, long error_code)
tsk = current;
if (!user_mode(regs)) {
if (fixup_exception(regs, X86_TRAP_GP))
if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
return;
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
/*
* To be potentially processing a kprobe fault and to
* trust the result from kprobe_running(), we have to
* be non-preemptible.
*/
if (!preemptible() && kprobe_running() &&
kprobe_fault_handler(regs, X86_TRAP_GP))
return;
if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
die("general protection fault", regs, error_code);
......@@ -838,7 +848,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
if (fixup_exception(regs, trapnr))
if (fixup_exception(regs, trapnr, error_code, 0))
return;
task->thread.error_code = error_code;
......
......@@ -273,11 +273,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define SRC(y...) \
9999: y; \
_ASM_EXTABLE(9999b, 6001f)
_ASM_EXTABLE_UA(9999b, 6001f)
#define DST(y...) \
9999: y; \
_ASM_EXTABLE(9999b, 6002f)
_ASM_EXTABLE_UA(9999b, 6002f)
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
......
......@@ -92,26 +92,26 @@ ENTRY(copy_user_generic_unrolled)
60: jmp copy_user_handle_tail /* ecx is zerorest also */
.previous
_ASM_EXTABLE(1b,30b)
_ASM_EXTABLE(2b,30b)
_ASM_EXTABLE(3b,30b)
_ASM_EXTABLE(4b,30b)
_ASM_EXTABLE(5b,30b)
_ASM_EXTABLE(6b,30b)
_ASM_EXTABLE(7b,30b)
_ASM_EXTABLE(8b,30b)
_ASM_EXTABLE(9b,30b)
_ASM_EXTABLE(10b,30b)
_ASM_EXTABLE(11b,30b)
_ASM_EXTABLE(12b,30b)
_ASM_EXTABLE(13b,30b)
_ASM_EXTABLE(14b,30b)
_ASM_EXTABLE(15b,30b)
_ASM_EXTABLE(16b,30b)
_ASM_EXTABLE(18b,40b)
_ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
_ASM_EXTABLE_UA(1b, 30b)
_ASM_EXTABLE_UA(2b, 30b)
_ASM_EXTABLE_UA(3b, 30b)
_ASM_EXTABLE_UA(4b, 30b)
_ASM_EXTABLE_UA(5b, 30b)
_ASM_EXTABLE_UA(6b, 30b)
_ASM_EXTABLE_UA(7b, 30b)
_ASM_EXTABLE_UA(8b, 30b)
_ASM_EXTABLE_UA(9b, 30b)
_ASM_EXTABLE_UA(10b, 30b)
_ASM_EXTABLE_UA(11b, 30b)
_ASM_EXTABLE_UA(12b, 30b)
_ASM_EXTABLE_UA(13b, 30b)
_ASM_EXTABLE_UA(14b, 30b)
_ASM_EXTABLE_UA(15b, 30b)
_ASM_EXTABLE_UA(16b, 30b)
_ASM_EXTABLE_UA(18b, 40b)
_ASM_EXTABLE_UA(19b, 40b)
_ASM_EXTABLE_UA(21b, 50b)
_ASM_EXTABLE_UA(22b, 50b)
ENDPROC(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
......@@ -156,8 +156,8 @@ ENTRY(copy_user_generic_string)
jmp copy_user_handle_tail
.previous
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
_ASM_EXTABLE_UA(1b, 11b)
_ASM_EXTABLE_UA(3b, 12b)
ENDPROC(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
......@@ -189,7 +189,7 @@ ENTRY(copy_user_enhanced_fast_string)
jmp copy_user_handle_tail
.previous
_ASM_EXTABLE(1b,12b)
_ASM_EXTABLE_UA(1b, 12b)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
......@@ -319,27 +319,27 @@ ENTRY(__copy_user_nocache)
jmp copy_user_handle_tail
.previous
_ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
_ASM_EXTABLE(20b,.L_fixup_8b_copy)
_ASM_EXTABLE(21b,.L_fixup_8b_copy)
_ASM_EXTABLE(30b,.L_fixup_4b_copy)
_ASM_EXTABLE(31b,.L_fixup_4b_copy)
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
_ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_UA(20b, .L_fixup_8b_copy)
_ASM_EXTABLE_UA(21b, .L_fixup_8b_copy)
_ASM_EXTABLE_UA(30b, .L_fixup_4b_copy)
_ASM_EXTABLE_UA(31b, .L_fixup_4b_copy)
_ASM_EXTABLE_UA(40b, .L_fixup_1b_copy)
_ASM_EXTABLE_UA(41b, .L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
......@@ -31,14 +31,18 @@
.macro source
10:
_ASM_EXTABLE(10b, .Lbad_source)
_ASM_EXTABLE_UA(10b, .Lbad_source)
.endm
.macro dest
20:
_ASM_EXTABLE(20b, .Lbad_dest)
_ASM_EXTABLE_UA(20b, .Lbad_dest)
.endm
/*
* No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
* potentially unmapped kernel address.
*/
.macro ignore L=.Lignore
30:
_ASM_EXTABLE(30b, \L)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment