Commit 02386c35 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

Merge branch 'perf/urgent' into perf/core, to pick up fixes before applying new changes

Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents d71b0ad8 f73e22ab
......@@ -21,8 +21,8 @@ exact way to do it depends on the GPIO controller providing the GPIOs, see the
device tree bindings for your controller.
GPIOs mappings are defined in the consumer device's node, in a property named
<function>-gpios, where <function> is the function the driver will request
through gpiod_get(). For example:
either <function>-gpios or <function>-gpio, where <function> is the function
the driver will request through gpiod_get(). For example:
foo_device {
compatible = "acme,foo";
......@@ -31,7 +31,7 @@ through gpiod_get(). For example:
<&gpio 16 GPIO_ACTIVE_HIGH>, /* green */
<&gpio 17 GPIO_ACTIVE_HIGH>; /* blue */
power-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
power-gpio = <&gpio 1 GPIO_ACTIVE_LOW>;
};
This property will make GPIOs 15, 16 and 17 available to the driver under the
......@@ -39,15 +39,24 @@ This property will make GPIOs 15, 16 and 17 available to the driver under the
struct gpio_desc *red, *green, *blue, *power;
red = gpiod_get_index(dev, "led", 0);
green = gpiod_get_index(dev, "led", 1);
blue = gpiod_get_index(dev, "led", 2);
red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
power = gpiod_get(dev, "power");
power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
gpiod_is_active_low(power) will be true).
The second parameter of the gpiod_get() functions, the con_id string, has to be
the <function>-prefix of the GPIO suffixes ("gpios" or "gpio", automatically
looked up by the gpiod functions internally) used in the device tree. With above
"led-gpios" example, use the prefix without the "-" as con_id parameter: "led".
Internally, the GPIO subsystem prefixes the GPIO suffix ("gpios" or "gpio")
with the string passed in con_id to get the resulting string
(snprintf(... "%s-%s", con_id, gpio_suffixes[]).
ACPI
----
ACPI also supports function names for GPIOs in a similar fashion to DT.
......@@ -142,13 +151,14 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
struct gpio_desc *red, *green, *blue, *power;
red = gpiod_get_index(dev, "led", 0);
green = gpiod_get_index(dev, "led", 1);
blue = gpiod_get_index(dev, "led", 2);
red = gpiod_get_index(dev, "led", 0, GPIOD_OUT_HIGH);
green = gpiod_get_index(dev, "led", 1, GPIOD_OUT_HIGH);
blue = gpiod_get_index(dev, "led", 2, GPIOD_OUT_HIGH);
power = gpiod_get(dev, "power");
gpiod_direction_output(power, 1);
power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
Since the "power" GPIO is mapped as active-low, its actual signal will be 0
after this code. Contrary to the legacy integer GPIO interface, the active-low
property is handled during mapping and is thus transparent to GPIO consumers.
Since the "led" GPIOs are mapped as active-high, this example will switch their
signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
as active-low, its actual signal will be 0 after this code. Contrary to the legacy
integer GPIO interface, the active-low property is handled during mapping and is
thus transparent to GPIO consumers.
......@@ -39,6 +39,9 @@ device that displays digits), an additional index argument can be specified:
const char *con_id, unsigned int idx,
enum gpiod_flags flags)
For a more detailed description of the con_id parameter in the DeviceTree case
see Documentation/gpio/board.txt
The flags parameter is used to optionally specify a direction and initial value
for the GPIO. Values can be:
......
......@@ -32,6 +32,10 @@ Supported chips:
Prefix: 'nct6792'
Addresses scanned: ISA address retrieved from Super I/O registers
Datasheet: Available from Nuvoton upon request
* Nuvoton NCT6793D
Prefix: 'nct6793'
Addresses scanned: ISA address retrieved from Super I/O registers
Datasheet: Available from Nuvoton upon request
Authors:
Guenter Roeck <linux@roeck-us.net>
......
......@@ -15,8 +15,8 @@ The updated API replacements are:
DEFINE_STATIC_KEY_TRUE(key);
DEFINE_STATIC_KEY_FALSE(key);
static_key_likely()
statick_key_unlikely()
static_branch_likely()
static_branch_unlikely()
0) Abstract
......
......@@ -6452,11 +6452,11 @@ F: drivers/hwmon/ltc4261.c
LTP (Linux Test Project)
M: Mike Frysinger <vapier@gentoo.org>
M: Cyril Hrubis <chrubis@suse.cz>
M: Wanlong Gao <gaowanlong@cn.fujitsu.com>
M: Wanlong Gao <wanlong.gao@gmail.com>
M: Jan Stancek <jstancek@redhat.com>
M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
M: Alexey Kodanev <alexey.kodanev@oracle.com>
L: ltp-list@lists.sourceforge.net (subscribers-only)
L: ltp@lists.linux.it (subscribers-only)
W: http://linux-test-project.github.io/
T: git git://github.com/linux-test-project/ltp.git
S: Maintained
......
......@@ -297,7 +297,9 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
unsigned long size)
{
return ioremap(offset, size);
}
}
#define ioremap_uc ioremap_nocache
static inline void iounmap(volatile void __iomem *addr)
{
......
......@@ -30,6 +30,7 @@ __delay(int loops)
" bgt %0,1b"
: "=&r" (tmp), "=r" (loops) : "1"(loops));
}
EXPORT_SYMBOL(__delay);
#ifdef CONFIG_SMP
#define LPJ cpu_data[smp_processor_id()].loops_per_jiffy
......
......@@ -54,6 +54,14 @@ AS += -EL
LD += -EL
endif
#
# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
# later may result in code being generated that handles signed short and signed
# char struct members incorrectly. So disable it.
# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
#
KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
......
......@@ -491,11 +491,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
.macro uaccess_save_and_disable, tmp
uaccess_save \tmp
uaccess_disable \tmp
.endm
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
......
......@@ -40,6 +40,7 @@ do { \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
".pushsection __bug_table,\"a\"\n" \
".align 2\n" \
"3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \
".popsection"); \
......
......@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
#include <asm/thread_info.h>
#endif
/*
......@@ -89,7 +90,8 @@ static inline unsigned int get_domain(void)
asm(
"mrc p15, 0, %0, c3, c0 @ get domain"
: "=r" (domain));
: "=r" (domain)
: "m" (current_thread_info()->cpu_domain));
return domain;
}
......@@ -98,7 +100,7 @@ static inline void set_domain(unsigned val)
{
asm volatile(
"mcr p15, 0, %0, c3, c0 @ set domain"
: : "r" (val));
: : "r" (val) : "memory");
isb();
}
......
......@@ -25,7 +25,6 @@
struct task_struct;
#include <asm/types.h>
#include <asm/domain.h>
typedef unsigned long mm_segment_t;
......
......@@ -226,6 +226,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
#ifdef CONFIG_CPU_USE_DOMAINS
/*
* Copy the initial value of the domain access control register
* from the current thread: thread->addr_limit will have been
......@@ -233,6 +234,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
* kernel/fork.c
*/
thread->cpu_domain = get_domain();
#endif
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
......
......@@ -95,9 +95,10 @@ emulate:
reteq r4 @ no, return failure
next:
uaccess_enable r3
.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@ increment PC
uaccess_disable r3
and r2, r6, #0x0F000000 @ test for FP insns
teq r2, #0x0C000000
teqne r2, #0x0D000000
......
......@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
mov r1, r2
mov r2, r3
ldr r3, [sp, #8]
/*
* Privcmd calls are issued by the userspace. We need to allow the
* kernel to access the userspace memory before issuing the hypercall.
*/
uaccess_enable r4
/* r4 is loaded now as we use it as scratch register before */
ldr r4, [sp, #4]
__HVC(XEN_IMM)
/*
* Disable userspace access from kernel. This is fine to do it
* unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
* called before.
*/
uaccess_disable r4
ldm sp!, {r4}
ret lr
ENDPROC(privcmd_call);
......@@ -32,6 +32,7 @@ config ARM64
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
......@@ -331,6 +332,22 @@ config ARM64_ERRATUM_845719
If unsure, say Y.
config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
depends on MODULES
default y
help
This option builds kernel modules using the large memory model in
order to avoid the use of the ADRP instruction, which can cause
a subsequent memory access to use an incorrect address on Cortex-A53
parts up to r0p4.
Note that the kernel itself must be linked with a version of ld
which fixes potentially affected ADRP instructions through the
use of veneers.
If unsure, say Y.
endmenu
......
......@@ -41,6 +41,10 @@ endif
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
CFLAGS_MODULE += -mcmodel=large
endif
# Default value
head-y := arch/arm64/kernel/head.o
......
......@@ -26,13 +26,9 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
#define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#ifdef CONFIG_ARM64_HW_AFDBM
#define PTE_WRITE (PTE_DBM) /* same as DBM */
#else
#define PTE_WRITE (_AT(pteval_t, 1) << 57)
#endif
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
......@@ -146,7 +142,7 @@ extern struct page *empty_zero_page;
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#ifdef CONFIG_ARM64_HW_AFDBM
#define pte_hw_dirty(pte) (!(pte_val(pte) & PTE_RDONLY))
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
#else
#define pte_hw_dirty(pte) (0)
#endif
......@@ -238,7 +234,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
* When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
* the page fault mechanism. Checking the dirty status of a pte becomes:
*
* PTE_DIRTY || !PTE_RDONLY
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
*/
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
......@@ -503,7 +499,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
newprot |= PTE_DIRTY;
pte = pte_mkdirty(pte);
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
......
......@@ -134,7 +134,7 @@ static int os_lock_notify(struct notifier_block *self,
unsigned long action, void *data)
{
int cpu = (unsigned long)data;
if (action == CPU_ONLINE)
if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single(cpu, clear_os_lock, NULL, 1);
return NOTIFY_OK;
}
......
......@@ -523,6 +523,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif
/* EL2 debug */
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
msr mdcr_el2, x0 // all PMU counters from EL1
/* Stage-2 translation */
msr vttbr_el2, xzr
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment