Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Open sidebar
OpenBMC Firmware
talos-obmc-linux
Commits
f5d9b97e
Commit
f5d9b97e
authored
19 years ago
by
Steve French
Browse files
Options
Download
Plain Diff
Merge with
rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
parents
3079ca62
cf380ee7
No related merge requests found
Changes
104
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
361 additions
and
226 deletions
+361
-226
arch/arm/boot/compressed/head-xscale.S
arch/arm/boot/compressed/head-xscale.S
+7
-0
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-armv.S
+11
-5
arch/arm/kernel/traps.c
arch/arm/kernel/traps.c
+49
-0
arch/arm/lib/io-writesw-armv4.S
arch/arm/lib/io-writesw-armv4.S
+3
-3
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-pxa/mainstone.c
+9
-0
arch/arm/mach-pxa/pm.c
arch/arm/mach-pxa/pm.c
+18
-14
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa25x.c
+29
-0
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-pxa/pxa27x.c
+32
-0
arch/arm/mach-s3c2410/dma.c
arch/arm/mach-s3c2410/dma.c
+4
-0
arch/arm/mm/Kconfig
arch/arm/mm/Kconfig
+8
-7
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+0
-2
arch/arm/mm/copypage-xscale.S
arch/arm/mm/copypage-xscale.S
+0
-113
arch/arm/mm/copypage-xscale.c
arch/arm/mm/copypage-xscale.c
+131
-0
arch/arm/mm/minicache.c
arch/arm/mm/minicache.c
+0
-73
arch/i386/kernel/Makefile
arch/i386/kernel/Makefile
+1
-1
arch/ia64/kernel/module.c
arch/ia64/kernel/module.c
+6
-4
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/ptrace.c
+6
-0
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+2
-1
arch/ia64/kernel/traps.c
arch/ia64/kernel/traps.c
+28
-1
arch/ia64/mm/init.c
arch/ia64/mm/init.c
+17
-2
No files found.
arch/arm/boot/compressed/head-xscale.S
View file @
f5d9b97e
...
...
@@ -47,3 +47,10 @@ __XScale_start:
orr
r7
,
r7
,
#(
MACH_TYPE_GTWX5715
&
0xff00
)
#endif
#ifdef CONFIG_ARCH_IXP2000
mov
r1
,
#-
1
mov
r0
,
#
0xd6000000
str
r1
,
[
r0
,
#
0x14
]
str
r1
,
[
r0
,
#
0x18
]
#endif
This diff is collapsed.
Click to expand it.
arch/arm/kernel/entry-armv.S
View file @
f5d9b97e
...
...
@@ -269,7 +269,7 @@ __pabt_svc:
add
r5
,
sp
,
#
S_PC
ldmia
r7
,
{
r2
-
r4
}
@
Get
USR
pc
,
cpsr
#if __LINUX_ARM_ARCH__ < 6
#if __LINUX_ARM_ARCH__ < 6
&& !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
@
make
sure
our
user
space
atomic
helper
is
aborted
cmp
r2
,
#
VIRT_OFFSET
bichs
r3
,
r3
,
#
PSR_Z_BIT
...
...
@@ -616,11 +616,17 @@ __kuser_helper_start:
__kuser_cmpxchg
:
@
0xffff0fc0
#if
__LINUX_ARM_ARCH__ < 6
#if
defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifdef CONFIG_SMP /* sanity check */
#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
#endif
/
*
*
Poor
you
.
No
fast
solution
possible
...
*
The
kernel
itself
must
perform
the
operation
.
*
A
special
ghost
syscall
is
used
for
that
(
see
traps
.
c
)
.
*/
swi
#
0x9ffff0
mov
pc
,
lr
#elif __LINUX_ARM_ARCH__ < 6
/
*
*
Theory
of
operation
:
...
...
This diff is collapsed.
Click to expand it.
arch/arm/kernel/traps.c
View file @
f5d9b97e
...
...
@@ -464,6 +464,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
#endif
return
0
;
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
/*
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
* Return zero in r0 if *MEM was changed or non-zero if no exchange
* happened. Also set the user C flag accordingly.
* If access permissions have to be fixed up then non-zero is
* returned and the operation has to be re-attempted.
*
* *NOTE*: This is a ghost syscall private to the kernel. Only the
* __kuser_cmpxchg code in entry-armv.S should be aware of its
* existence. Don't ever use this from user code.
*/
case
0xfff0
:
{
extern
void
do_DataAbort
(
unsigned
long
addr
,
unsigned
int
fsr
,
struct
pt_regs
*
regs
);
unsigned
long
val
;
unsigned
long
addr
=
regs
->
ARM_r2
;
struct
mm_struct
*
mm
=
current
->
mm
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
regs
->
ARM_cpsr
&=
~
PSR_C_BIT
;
spin_lock
(
&
mm
->
page_table_lock
);
pgd
=
pgd_offset
(
mm
,
addr
);
if
(
!
pgd_present
(
*
pgd
))
goto
bad_access
;
pmd
=
pmd_offset
(
pgd
,
addr
);
if
(
!
pmd_present
(
*
pmd
))
goto
bad_access
;
pte
=
pte_offset_map
(
pmd
,
addr
);
if
(
!
pte_present
(
*
pte
)
||
!
pte_write
(
*
pte
))
goto
bad_access
;
val
=
*
(
unsigned
long
*
)
addr
;
val
-=
regs
->
ARM_r0
;
if
(
val
==
0
)
{
*
(
unsigned
long
*
)
addr
=
regs
->
ARM_r1
;
regs
->
ARM_cpsr
|=
PSR_C_BIT
;
}
spin_unlock
(
&
mm
->
page_table_lock
);
return
val
;
bad_access:
spin_unlock
(
&
mm
->
page_table_lock
);
/* simulate a read access fault */
do_DataAbort
(
addr
,
15
+
(
1
<<
11
),
regs
);
return
-
1
;
}
#endif
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
...
...
This diff is collapsed.
Click to expand it.
arch/arm/lib/io-writesw-armv4.S
View file @
f5d9b97e
...
...
@@ -87,9 +87,9 @@ ENTRY(__raw_writesw)
subs
r2
,
r2
,
#
2
orr
ip
,
ip
,
r3
,
push_hbyte1
strh
ip
,
[
r0
]
bpl
2
b
bpl
1
b
3
:
tst
r2
,
#
1
2
:
movne
ip
,
r3
,
lsr
#
8
tst
r2
,
#
1
3
:
movne
ip
,
r3
,
lsr
#
8
strneh
ip
,
[
r0
]
mov
pc
,
lr
This diff is collapsed.
Click to expand it.
arch/arm/mach-pxa/mainstone.c
View file @
f5d9b97e
...
...
@@ -304,6 +304,15 @@ static void __init mainstone_map_io(void)
PWER
=
0xC0000002
;
PRER
=
0x00000002
;
PFER
=
0x00000002
;
/* for use I SRAM as framebuffer. */
PSLR
|=
0xF04
;
PCFR
=
0x66
;
/* For Keypad wakeup. */
KPC
&=~
KPC_ASACT
;
KPC
|=
KPC_AS
;
PKWR
=
0x000FD000
;
/* Need read PKWR back after set it. */
PKWR
;
}
MACHINE_START
(
MAINSTONE
,
"Intel HCDDBBVA0 Development Platform (aka Mainstone)"
)
...
...
This diff is collapsed.
Click to expand it.
arch/arm/mach-pxa/pm.c
View file @
f5d9b97e
...
...
@@ -29,9 +29,6 @@
*/
#undef DEBUG
extern
void
pxa_cpu_suspend
(
void
);
extern
void
pxa_cpu_resume
(
void
);
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
...
...
@@ -63,6 +60,12 @@ enum { SLEEP_SAVE_START = 0,
SLEEP_SAVE_ICMR
,
SLEEP_SAVE_CKEN
,
#ifdef CONFIG_PXA27x
SLEEP_SAVE_MDREFR
,
SLEEP_SAVE_PWER
,
SLEEP_SAVE_PCFR
,
SLEEP_SAVE_PRER
,
SLEEP_SAVE_PFER
,
SLEEP_SAVE_PKWR
,
#endif
SLEEP_SAVE_CKSUM
,
SLEEP_SAVE_SIZE
...
...
@@ -75,9 +78,7 @@ static int pxa_pm_enter(suspend_state_t state)
unsigned
long
checksum
=
0
;
struct
timespec
delta
,
rtc
;
int
i
;
if
(
state
!=
PM_SUSPEND_MEM
)
return
-
EINVAL
;
extern
void
pxa_cpu_pm_enter
(
suspend_state_t
state
);
#ifdef CONFIG_IWMMXT
/* force any iWMMXt context to ram **/
...
...
@@ -100,16 +101,17 @@ static int pxa_pm_enter(suspend_state_t state)
SAVE
(
GAFR2_L
);
SAVE
(
GAFR2_U
);
#ifdef CONFIG_PXA27x
SAVE
(
MDREFR
);
SAVE
(
GPLR3
);
SAVE
(
GPDR3
);
SAVE
(
GRER3
);
SAVE
(
GFER3
);
SAVE
(
PGSR3
);
SAVE
(
GAFR3_L
);
SAVE
(
GAFR3_U
);
SAVE
(
PWER
);
SAVE
(
PCFR
);
SAVE
(
PRER
);
SAVE
(
PFER
);
SAVE
(
PKWR
);
#endif
SAVE
(
ICMR
);
ICMR
=
0
;
SAVE
(
CKEN
);
CKEN
=
0
;
SAVE
(
PSTR
);
/* Note: wake up source are set up in each machine specific files */
...
...
@@ -123,16 +125,13 @@ static int pxa_pm_enter(suspend_state_t state)
/* Clear sleep reset status */
RCSR
=
RCSR_SMR
;
/* set resume return address */
PSPR
=
virt_to_phys
(
pxa_cpu_resume
);
/* before sleeping, calculate and save a checksum */
for
(
i
=
0
;
i
<
SLEEP_SAVE_SIZE
-
1
;
i
++
)
checksum
+=
sleep_save
[
i
];
sleep_save
[
SLEEP_SAVE_CKSUM
]
=
checksum
;
/* *** go zzz *** */
pxa_cpu_
suspend
(
);
pxa_cpu_
pm_enter
(
state
);
/* after sleeping, validate the checksum */
checksum
=
0
;
...
...
@@ -145,7 +144,7 @@ static int pxa_pm_enter(suspend_state_t state)
LUB_HEXLED
=
0xbadbadc5
;
#endif
while
(
1
)
pxa_cpu_
suspend
(
);
pxa_cpu_
pm_enter
(
state
);
}
/* ensure not to come back here if it wasn't intended */
...
...
@@ -162,8 +161,11 @@ static int pxa_pm_enter(suspend_state_t state)
RESTORE
(
PGSR0
);
RESTORE
(
PGSR1
);
RESTORE
(
PGSR2
);
#ifdef CONFIG_PXA27x
RESTORE
(
MDREFR
);
RESTORE
(
GAFR3_L
);
RESTORE
(
GAFR3_U
);
RESTORE_GPLEVEL
(
3
);
RESTORE
(
GPDR3
);
RESTORE
(
GRER3
);
RESTORE
(
GFER3
);
RESTORE
(
PGSR3
);
RESTORE
(
PWER
);
RESTORE
(
PCFR
);
RESTORE
(
PRER
);
RESTORE
(
PFER
);
RESTORE
(
PKWR
);
#endif
PSSR
=
PSSR_RDH
|
PSSR_PH
;
...
...
@@ -197,7 +199,9 @@ unsigned long sleep_phys_sp(void *sp)
*/
static
int
pxa_pm_prepare
(
suspend_state_t
state
)
{
return
0
;
extern
int
pxa_cpu_pm_prepare
(
suspend_state_t
state
);
return
pxa_cpu_pm_prepare
(
state
);
}
/*
...
...
This diff is collapsed.
Click to expand it.
arch/arm/mach-pxa/pxa25x.c
View file @
f5d9b97e
...
...
@@ -102,3 +102,32 @@ unsigned int get_lcdclk_frequency_10khz(void)
}
EXPORT_SYMBOL
(
get_lcdclk_frequency_10khz
);
int
pxa_cpu_pm_prepare
(
suspend_state_t
state
)
{
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
break
;
default:
return
-
EINVAL
;
}
return
0
;
}
void
pxa_cpu_pm_enter
(
suspend_state_t
state
)
{
extern
void
pxa_cpu_suspend
(
unsigned
int
);
extern
void
pxa_cpu_resume
(
void
);
CKEN
=
0
;
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
/* set resume return address */
PSPR
=
virt_to_phys
(
pxa_cpu_resume
);
pxa_cpu_suspend
(
3
);
break
;
}
}
This diff is collapsed.
Click to expand it.
arch/arm/mach-pxa/pxa27x.c
View file @
f5d9b97e
...
...
@@ -120,6 +120,38 @@ EXPORT_SYMBOL(get_clk_frequency_khz);
EXPORT_SYMBOL
(
get_memclk_frequency_10khz
);
EXPORT_SYMBOL
(
get_lcdclk_frequency_10khz
);
int
pxa_cpu_pm_prepare
(
suspend_state_t
state
)
{
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
return
0
;
default:
return
-
EINVAL
;
}
}
void
pxa_cpu_pm_enter
(
suspend_state_t
state
)
{
extern
void
pxa_cpu_standby
(
void
);
extern
void
pxa_cpu_suspend
(
unsigned
int
);
extern
void
pxa_cpu_resume
(
void
);
CKEN
=
CKEN22_MEMC
|
CKEN9_OSTIMER
;
/* ensure voltage-change sequencer not initiated, which hangs */
PCFR
&=
~
PCFR_FVC
;
/* Clear edge-detect status register. */
PEDR
=
0xDF12FE1B
;
switch
(
state
)
{
case
PM_SUSPEND_MEM
:
/* set resume return address */
PSPR
=
virt_to_phys
(
pxa_cpu_resume
);
pxa_cpu_suspend
(
3
);
break
;
}
}
/*
* device registration specific to PXA27x.
...
...
This diff is collapsed.
Click to expand it.
arch/arm/mach-s3c2410/dma.c
View file @
f5d9b97e
...
...
@@ -785,6 +785,10 @@ int s3c2410_dma_free(dmach_t channel, s3c2410_dma_client_t *client)
chan
->
client
=
NULL
;
chan
->
in_use
=
0
;
if
(
chan
->
irq_claimed
)
free_irq
(
chan
->
irq
,
(
void
*
)
chan
);
chan
->
irq_claimed
=
0
;
local_irq_restore
(
flags
);
return
0
;
...
...
This diff is collapsed.
Click to expand it.
arch/arm/mm/Kconfig
View file @
f5d9b97e
...
...
@@ -228,7 +228,6 @@ config CPU_SA1100
select CPU_CACHE_V4WB
select CPU_CACHE_VIVT
select CPU_TLB_V4WB
select CPU_MINICACHE
# XScale
config CPU_XSCALE
...
...
@@ -239,7 +238,6 @@ config CPU_XSCALE
select CPU_ABRT_EV5T
select CPU_CACHE_VIVT
select CPU_TLB_V4WBI
select CPU_MINICACHE
# ARMv6
config CPU_V6
...
...
@@ -345,11 +343,6 @@ config CPU_TLB_V4WBI
config CPU_TLB_V6
bool
config CPU_MINICACHE
bool
help
Processor has a minicache.
comment "Processor Features"
config ARM_THUMB
...
...
@@ -429,3 +422,11 @@ config HAS_TLS_REG
assume directly accessing that register and always obtain the
expected value only on ARMv7 and above.
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
This diff is collapsed.
Click to expand it.
arch/arm/mm/Makefile
View file @
f5d9b97e
...
...
@@ -31,8 +31,6 @@ obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100)
+=
copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE)
+=
copypage-xscale.o
obj-$(CONFIG_CPU_MINICACHE)
+=
minicache.o
obj-$(CONFIG_CPU_TLB_V3)
+=
tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT)
+=
tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB)
+=
tlb-v4wb.o
...
...
This diff is collapsed.
Click to expand it.
arch/arm/mm/copypage-xscale.S
deleted
100644 → 0
View file @
3079ca62
/*
*
linux
/
arch
/
arm
/
lib
/
copypage
-
xscale
.
S
*
*
Copyright
(
C
)
2001
Russell
King
*
*
This
program
is
free
software
; you can redistribute it and/or modify
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
published
by
the
Free
Software
Foundation
.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/constants.h>
/*
*
General
note
:
*
We
don
't really want write-allocate cache behaviour for these functions
*
since
that
will
just
eat
through
8
K
of
the
cache
.
*/
.
text
.
align
5
/*
*
XScale
optimised
copy_user_page
*
r0
=
destination
*
r1
=
source
*
r2
=
virtual
user
address
of
ultimate
destination
page
*
*
The
source
page
may
have
some
clean
entries
in
the
cache
already
,
but
we
*
can
safely
ignore
them
-
break_cow
()
will
flush
them
out
of
the
cache
*
if
we
eventually
end
up
using
our
copied
page
.
*
*
What
we
could
do
is
use
the
mini
-
cache
to
buffer
reads
from
the
source
*
page
.
We
rely
on
the
mini
-
cache
being
smaller
than
one
page
,
so
we
'll
*
cycle
through
the
complete
cache
anyway
.
*/
ENTRY
(
xscale_mc_copy_user_page
)
stmfd
sp
!,
{
r4
,
r5
,
lr
}
mov
r5
,
r0
mov
r0
,
r1
bl
map_page_minicache
mov
r1
,
r5
mov
lr
,
#
PAGE_SZ
/
64
-
1
/
*
*
Strangely
enough
,
best
performance
is
achieved
*
when
prefetching
destination
as
well
.
(
NP
)
*/
pld
[
r0
,
#
0
]
pld
[
r0
,
#
32
]
pld
[
r1
,
#
0
]
pld
[
r1
,
#
32
]
1
:
pld
[
r0
,
#
64
]
pld
[
r0
,
#
96
]
pld
[
r1
,
#
64
]
pld
[
r1
,
#
96
]
2
:
ldrd
r2
,
[
r0
],
#
8
ldrd
r4
,
[
r0
],
#
8
mov
ip
,
r1
strd
r2
,
[
r1
],
#
8
ldrd
r2
,
[
r0
],
#
8
strd
r4
,
[
r1
],
#
8
ldrd
r4
,
[
r0
],
#
8
strd
r2
,
[
r1
],
#
8
strd
r4
,
[
r1
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c10
,
1
@
clean
D
line
ldrd
r2
,
[
r0
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c6
,
1
@
invalidate
D
line
ldrd
r4
,
[
r0
],
#
8
mov
ip
,
r1
strd
r2
,
[
r1
],
#
8
ldrd
r2
,
[
r0
],
#
8
strd
r4
,
[
r1
],
#
8
ldrd
r4
,
[
r0
],
#
8
strd
r2
,
[
r1
],
#
8
strd
r4
,
[
r1
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c10
,
1
@
clean
D
line
subs
lr
,
lr
,
#
1
mcr
p15
,
0
,
ip
,
c7
,
c6
,
1
@
invalidate
D
line
bgt
1
b
beq
2
b
ldmfd
sp
!,
{
r4
,
r5
,
pc
}
.
align
5
/*
*
XScale
optimised
clear_user_page
*
r0
=
destination
*
r1
=
virtual
user
address
of
ultimate
destination
page
*/
ENTRY
(
xscale_mc_clear_user_page
)
mov
r1
,
#
PAGE_SZ
/
32
mov
r2
,
#
0
mov
r3
,
#
0
1
:
mov
ip
,
r0
strd
r2
,
[
r0
],
#
8
strd
r2
,
[
r0
],
#
8
strd
r2
,
[
r0
],
#
8
strd
r2
,
[
r0
],
#
8
mcr
p15
,
0
,
ip
,
c7
,
c10
,
1
@
clean
D
line
subs
r1
,
r1
,
#
1
mcr
p15
,
0
,
ip
,
c7
,
c6
,
1
@
invalidate
D
line
bne
1
b
mov
pc
,
lr
__INITDATA
.
type
xscale_mc_user_fns
,
#
object
ENTRY
(
xscale_mc_user_fns
)
.
long
xscale_mc_clear_user_page
.
long
xscale_mc_copy_user_page
.
size
xscale_mc_user_fns
,
.
-
xscale_mc_user_fns
This diff is collapsed.
Click to expand it.
arch/arm/mm/copypage-xscale.c
0 → 100644
View file @
f5d9b97e
/*
* linux/arch/arm/lib/copypage-xscale.S
*
* Copyright (C) 1995-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define COPYPAGE_MINICACHE 0xffff8000
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE)
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
static
DEFINE_SPINLOCK
(
minicache_lock
);
/*
* XScale mini-dcache optimised copy_user_page
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
static
void
__attribute__
((
naked
))
mc_copy_user_page
(
void
*
from
,
void
*
to
)
{
/*
* Strangely enough, best performance is achieved
* when prefetching destination as well. (NP)
*/
asm
volatile
(
"stmfd sp!, {r4, r5, lr}
\n
\
mov lr, %2
\n
\
pld [r0, #0]
\n
\
pld [r0, #32]
\n
\
pld [r1, #0]
\n
\
pld [r1, #32]
\n
\
1: pld [r0, #64]
\n
\
pld [r0, #96]
\n
\
pld [r1, #64]
\n
\
pld [r1, #96]
\n
\
2: ldrd r2, [r0], #8
\n
\
ldrd r4, [r0], #8
\n
\
mov ip, r1
\n
\
strd r2, [r1], #8
\n
\
ldrd r2, [r0], #8
\n
\
strd r4, [r1], #8
\n
\
ldrd r4, [r0], #8
\n
\
strd r2, [r1], #8
\n
\
strd r4, [r1], #8
\n
\
mcr p15, 0, ip, c7, c10, 1 @ clean D line
\n
\
ldrd r2, [r0], #8
\n
\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
\n
\
ldrd r4, [r0], #8
\n
\
mov ip, r1
\n
\
strd r2, [r1], #8
\n
\
ldrd r2, [r0], #8
\n
\
strd r4, [r1], #8
\n
\
ldrd r4, [r0], #8
\n
\
strd r2, [r1], #8
\n
\
strd r4, [r1], #8
\n
\
mcr p15, 0, ip, c7, c10, 1 @ clean D line
\n
\
subs lr, lr, #1
\n
\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
\n
\
bgt 1b
\n
\
beq 2b
\n
\
ldmfd sp!, {r4, r5, pc} "
:
:
"r"
(
from
),
"r"
(
to
),
"I"
(
PAGE_SIZE
/
64
-
1
));
}
void
xscale_mc_copy_user_page
(
void
*
kto
,
const
void
*
kfrom
,
unsigned
long
vaddr
)
{
spin_lock
(
&
minicache_lock
);
set_pte
(
TOP_PTE
(
COPYPAGE_MINICACHE
),
pfn_pte
(
__pa
(
kfrom
)
>>
PAGE_SHIFT
,
minicache_pgprot
));
flush_tlb_kernel_page
(
COPYPAGE_MINICACHE
);
mc_copy_user_page
((
void
*
)
COPYPAGE_MINICACHE
,
kto
);
spin_unlock
(
&
minicache_lock
);
}
/*
* XScale optimised clear_user_page
*/
void
__attribute__
((
naked
))
xscale_mc_clear_user_page
(
void
*
kaddr
,
unsigned
long
vaddr
)
{
asm
volatile
(
"mov r1, %0
\n
\
mov r2, #0
\n
\
mov r3, #0
\n
\
1: mov ip, r0
\n
\
strd r2, [r0], #8
\n
\
strd r2, [r0], #8
\n
\
strd r2, [r0], #8
\n
\
strd r2, [r0], #8
\n
\
mcr p15, 0, ip, c7, c10, 1 @ clean D line
\n
\
subs r1, r1, #1
\n
\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
\n
\
bne 1b
\n
\
mov pc, lr"
:
:
"I"
(
PAGE_SIZE
/
32
));
}
struct
cpu_user_fns
xscale_mc_user_fns
__initdata
=
{
.
cpu_clear_user_page
=
xscale_mc_clear_user_page
,
.
cpu_copy_user_page
=
xscale_mc_copy_user_page
,
};
This diff is collapsed.
Click to expand it.
arch/arm/mm/minicache.c
deleted
100644 → 0
View file @
3079ca62
/*
* linux/arch/arm/mm/minicache.c
*
* Copyright (C) 2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define minicache_address (0xffff8000)
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE)
static
pte_t
*
minicache_pte
;
/*
* Note that this is intended to be called only from the copy_user_page
* asm code; anything else will require special locking to prevent the
* mini-cache space being re-used. (Note: probably preempt unsafe).
*
* We rely on the fact that the minicache is 2K, and we'll be pushing
* 4K of data through it, so we don't actually have to specifically
* flush the minicache when we change the mapping.
*
* Note also: assert(PAGE_OFFSET <= virt < high_memory).
* Unsafe: preempt, kmap.
*/
unsigned
long
map_page_minicache
(
unsigned
long
virt
)
{
set_pte
(
minicache_pte
,
pfn_pte
(
__pa
(
virt
)
>>
PAGE_SHIFT
,
minicache_pgprot
));
flush_tlb_kernel_page
(
minicache_address
);
return
minicache_address
;
}
static
int
__init
minicache_init
(
void
)
{
pgd_t
*
pgd
;
pmd_t
*
pmd
;
spin_lock
(
&
init_mm
.
page_table_lock
);
pgd
=
pgd_offset_k
(
minicache_address
);
pmd
=
pmd_alloc
(
&
init_mm
,
pgd
,
minicache_address
);
if
(
!
pmd
)
BUG
();
minicache_pte
=
pte_alloc_kernel
(
&
init_mm
,
pmd
,
minicache_address
);
if
(
!
minicache_pte
)
BUG
();
spin_unlock
(
&
init_mm
.
page_table_lock
);
return
0
;
}
core_initcall
(
minicache_init
);
This diff is collapsed.
Click to expand it.
arch/i386/kernel/Makefile
View file @
f5d9b97e
...
...
@@ -43,7 +43,7 @@ obj-$(CONFIG_SCx200) += scx200.o
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/vsyscall.o
:
$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
targets
+=
$(
foreach
F,int80 sysenter,vsyscall-
$F
.o vsyscall-
$F
.so
)
targets
+=
vsyscall.lds
targets
+=
vsyscall-note.o
vsyscall.lds
# The DSO images are built using a special linker script.
quiet_cmd_syscall
=
SYSCALL
$@
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/module.c
View file @
f5d9b97e
...
...
@@ -825,14 +825,16 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* XXX Should have an arch-hook for running this after final section
* addresses have been selected...
*/
/* See if gp can cover the entire core module: */
uint64_t
gp
=
(
uint64_t
)
mod
->
module_core
+
MAX_LTOFF
/
2
;
if
(
mod
->
core_size
>=
MAX_LTOFF
)
uint64_t
gp
;
if
(
mod
->
core_size
>
MAX_LTOFF
)
/*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module.
*/
gp
=
(
uint64_t
)
mod
->
module_core
+
mod
->
core_size
-
MAX_LTOFF
/
2
;
gp
=
mod
->
core_size
-
MAX_LTOFF
/
2
;
else
gp
=
mod
->
core_size
/
2
;
gp
=
(
uint64_t
)
mod
->
module_core
+
((
gp
+
7
)
&
-
8
);
mod
->
arch
.
gp
=
gp
;
DEBUGP
(
"%s: placing gp at 0x%lx
\n
"
,
__FUNCTION__
,
gp
);
}
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/ptrace.c
View file @
f5d9b97e
...
...
@@ -635,11 +635,17 @@ ia64_flush_fph (struct task_struct *task)
{
struct
ia64_psr
*
psr
=
ia64_psr
(
ia64_task_regs
(
task
));
/*
* Prevent migrating this task while
* we're fiddling with the FPU state
*/
preempt_disable
();
if
(
ia64_is_local_fpu_owner
(
task
)
&&
psr
->
mfh
)
{
psr
->
mfh
=
0
;
task
->
thread
.
flags
|=
IA64_THREAD_FPH_VALID
;
ia64_save_fpu
(
&
task
->
thread
.
fph
[
0
]);
}
preempt_enable
();
}
/*
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/setup.c
View file @
f5d9b97e
...
...
@@ -720,7 +720,8 @@ cpu_init (void)
ia64_set_kr
(
IA64_KR_PT_BASE
,
__pa
(
ia64_imva
(
empty_zero_page
)));
/*
* Initialize default control register to defer all speculative faults. The
* Initialize default control register to defer speculative faults except
* for those arising from TLB misses, which are not deferred. The
* kernel MUST NOT depend on a particular setting of these bits (in other words,
* the kernel must have recovery code for all speculative accesses). Turn on
* dcr.lc as per recommendation by the architecture team. Most IA-32 apps
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/traps.c
View file @
f5d9b97e
...
...
@@ -111,6 +111,24 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo_t
siginfo
;
int
sig
,
code
;
/* break.b always sets cr.iim to 0, which causes problems for
* debuggers. Get the real break number from the original instruction,
* but only for kernel code. User space break.b is left alone, to
* preserve the existing behaviour. All break codings have the same
* format, so there is no need to check the slot type.
*/
if
(
break_num
==
0
&&
!
user_mode
(
regs
))
{
struct
ia64_psr
*
ipsr
=
ia64_psr
(
regs
);
unsigned
long
*
bundle
=
(
unsigned
long
*
)
regs
->
cr_iip
;
unsigned
long
slot
;
switch
(
ipsr
->
ri
)
{
case
0
:
slot
=
(
bundle
[
0
]
>>
5
);
break
;
case
1
:
slot
=
(
bundle
[
0
]
>>
46
)
|
(
bundle
[
1
]
<<
18
);
break
;
default:
slot
=
(
bundle
[
1
]
>>
23
);
break
;
}
break_num
=
((
slot
>>
36
&
1
)
<<
20
)
|
(
slot
>>
6
&
0xfffff
);
}
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo
.
si_addr
=
(
void
__user
*
)
(
regs
->
cr_iip
+
ia64_psr
(
regs
)
->
ri
);
siginfo
.
si_imm
=
break_num
;
...
...
@@ -202,13 +220,21 @@ disabled_fph_fault (struct pt_regs *regs)
/* first, grant user-level access to fph partition: */
psr
->
dfh
=
0
;
/*
* Make sure that no other task gets in on this processor
* while we're claiming the FPU
*/
preempt_disable
();
#ifndef CONFIG_SMP
{
struct
task_struct
*
fpu_owner
=
(
struct
task_struct
*
)
ia64_get_kr
(
IA64_KR_FPU_OWNER
);
if
(
ia64_is_local_fpu_owner
(
current
))
if
(
ia64_is_local_fpu_owner
(
current
))
{
preempt_enable_no_resched
();
return
;
}
if
(
fpu_owner
)
ia64_flush_fph
(
fpu_owner
);
...
...
@@ -226,6 +252,7 @@ disabled_fph_fault (struct pt_regs *regs)
*/
psr
->
mfh
=
1
;
}
preempt_enable_no_resched
();
}
static
inline
int
...
...
This diff is collapsed.
Click to expand it.
arch/ia64/mm/init.c
View file @
f5d9b97e
...
...
@@ -305,8 +305,9 @@ setup_gate (void)
struct
page
*
page
;
/*
* Map the gate page twice: once read-only to export the ELF headers etc. and once
* execute-only page to enable privilege-promotion via "epc":
* Map the gate page twice: once read-only to export the ELF
* headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
*/
page
=
virt_to_page
(
ia64_imva
(
__start_gate_section
));
put_kernel_page
(
page
,
GATE_ADDR
,
PAGE_READONLY
);
...
...
@@ -315,6 +316,20 @@ setup_gate (void)
put_kernel_page
(
page
,
GATE_ADDR
+
PAGE_SIZE
,
PAGE_GATE
);
#else
put_kernel_page
(
page
,
GATE_ADDR
+
PERCPU_PAGE_SIZE
,
PAGE_GATE
);
/* Fill in the holes (if any) with read-only zero pages: */
{
unsigned
long
addr
;
for
(
addr
=
GATE_ADDR
+
PAGE_SIZE
;
addr
<
GATE_ADDR
+
PERCPU_PAGE_SIZE
;
addr
+=
PAGE_SIZE
)
{
put_kernel_page
(
ZERO_PAGE
(
0
),
addr
,
PAGE_READONLY
);
put_kernel_page
(
ZERO_PAGE
(
0
),
addr
+
PERCPU_PAGE_SIZE
,
PAGE_READONLY
);
}
}
#endif
ia64_patch_gate
();
}
...
...
This diff is collapsed.
Click to expand it.
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment