Commit 42859eea authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal

Pull generic execve() changes from Al Viro:
 "This introduces the generic kernel_thread() and kernel_execve()
  functions, and switches x86, arm, alpha, um and s390 over to them."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal: (26 commits)
  s390: convert to generic kernel_execve()
  s390: switch to generic kernel_thread()
  s390: fold kernel_thread_helper() into ret_from_fork()
  s390: fold execve_tail() into start_thread(), convert to generic sys_execve()
  um: switch to generic kernel_thread()
  x86, um/x86: switch to generic sys_execve and kernel_execve
  x86: split ret_from_fork
  alpha: introduce ret_from_kernel_execve(), switch to generic kernel_execve()
  alpha: switch to generic kernel_thread()
  alpha: switch to generic sys_execve()
  arm: get rid of execve wrapper, switch to generic execve() implementation
  arm: optimized current_pt_regs()
  arm: introduce ret_from_kernel_execve(), switch to generic kernel_execve()
  arm: split ret_from_fork, simplify kernel_thread() [based on patch by rmk]
  generic sys_execve()
  generic kernel_execve()
  new helper: current_pt_regs()
  preparation for generic kernel_thread()
  um: kill thread->forking
  um: let signal_delivered() do SIGTRAP on singlestepping into handler
  ...
parents f59b51fe f322220d
...@@ -271,6 +271,9 @@ config ARCH_WANT_OLD_COMPAT_IPC ...@@ -271,6 +271,9 @@ config ARCH_WANT_OLD_COMPAT_IPC
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool bool
config GENERIC_KERNEL_THREAD
bool
config HAVE_ARCH_SECCOMP_FILTER config HAVE_ARCH_SECCOMP_FILTER
bool bool
help help
......
...@@ -20,6 +20,7 @@ config ALPHA ...@@ -20,6 +20,7 @@ config ALPHA
select GENERIC_CMOS_UPDATE select GENERIC_CMOS_UPDATE
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_KERNEL_THREAD
help help
The Alpha is a 64-bit general-purpose processor designed and The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory, marketed by the Digital Equipment Corporation of blessed memory,
......
...@@ -10,3 +10,4 @@ header-y += pal.h ...@@ -10,3 +10,4 @@ header-y += pal.h
header-y += reg.h header-y += reg.h
header-y += regdef.h header-y += regdef.h
header-y += sysinfo.h header-y += sysinfo.h
generic-y += exec.h
#ifndef __ALPHA_EXEC_H
#define __ALPHA_EXEC_H
#define arch_align_stack(x) (x)
#endif /* __ALPHA_EXEC_H */
...@@ -49,9 +49,6 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long); ...@@ -49,9 +49,6 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Create a kernel thread without removing it from tasklists. */
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
......
...@@ -481,6 +481,8 @@ ...@@ -481,6 +481,8 @@
#define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_KERNEL_EXECVE
/* "Conditional" syscalls. What we want is /* "Conditional" syscalls. What we want is
......
...@@ -50,9 +50,6 @@ EXPORT_SYMBOL(alpha_read_fp_reg_s); ...@@ -50,9 +50,6 @@ EXPORT_SYMBOL(alpha_read_fp_reg_s);
EXPORT_SYMBOL(alpha_write_fp_reg); EXPORT_SYMBOL(alpha_write_fp_reg);
EXPORT_SYMBOL(alpha_write_fp_reg_s); EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* entry.S */
EXPORT_SYMBOL(kernel_thread);
/* Networking helper routines. */ /* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic); EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(ip_compute_csum); EXPORT_SYMBOL(ip_compute_csum);
......
...@@ -609,59 +609,35 @@ ret_from_fork: ...@@ -609,59 +609,35 @@ ret_from_fork:
.end ret_from_fork .end ret_from_fork
/* /*
* kernel_thread(fn, arg, clone_flags) * ... and new kernel threads - here
*/ */
.align 4 .align 4
.globl kernel_thread .globl ret_from_kernel_thread
.ent kernel_thread .ent ret_from_kernel_thread
kernel_thread: ret_from_kernel_thread:
/* We can be called from a module. */ mov $17, $16
ldgp $gp, 0($27) jsr $26, schedule_tail
.prologue 1 mov $9, $27
subq $sp, SP_OFF+6*8, $sp mov $10, $16
br $1, 2f /* load start address */ jsr $26, ($9)
/* We've now "returned" from a fake system call. */
unop
blt $0, 1f /* error? */
ldi $1, 0x3fff
beq $20, 1f /* parent or child? */
bic $sp, $1, $8 /* in child. */
jsr $26, ($27)
ldgp $gp, 0($26) ldgp $gp, 0($26)
mov $0, $16 mov $0, $16
mov $31, $26 mov $31, $26
jmp $31, sys_exit jmp $31, sys_exit
.end ret_from_kernel_thread
1: ret /* in parent. */ .globl ret_from_kernel_execve
.align 4
.align 4 .ent ret_from_kernel_execve
2: /* Fake a system call stack frame, as we can't do system calls ret_from_kernel_execve:
from kernel space. Note that we store FN and ARG as they mov $16, $sp
need to be set up in the child for the call. Also store $8
and $26 for use in the parent. */
stq $31, SP_OFF($sp) /* ps */
stq $1, SP_OFF+8($sp) /* pc */
stq $gp, SP_OFF+16($sp) /* gp */
stq $16, 136($sp) /* $27; FN for child */
stq $17, SP_OFF+24($sp) /* $16; ARG for child */
stq $8, 64($sp) /* $8 */
stq $26, 128($sp) /* $26 */
/* Avoid the HAE being gratuitously wrong, to avoid restoring it. */ /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */
ldq $2, alpha_mv+HAE_CACHE ldq $2, alpha_mv+HAE_CACHE
stq $2, 152($sp) /* HAE */ stq $2, 152($sp) /* HAE */
mov $31, $19 /* to disable syscall restarts */
br $31, ret_to_user
/* Shuffle FLAGS to the front; add CLONE_VM. */ .end ret_from_kernel_execve
ldi $1, CLONE_VM|CLONE_UNTRACED
or $18, $1, $16
bsr $26, sys_clone
/* We don't actually care for a3 success widgetry in the kernel.
Not for positive errno values. */
stq $0, 0($sp) /* $0 */
br ret_to_kernel
.end kernel_thread
/* /*
...@@ -744,15 +720,6 @@ sys_rt_sigreturn: ...@@ -744,15 +720,6 @@ sys_rt_sigreturn:
br ret_from_sys_call br ret_from_sys_call
.end sys_rt_sigreturn .end sys_rt_sigreturn
.align 4
.globl sys_execve
.ent sys_execve
sys_execve:
.prologue 0
mov $sp, $19
jmp $31, do_sys_execve
.end sys_execve
.align 4 .align 4
.globl alpha_ni_syscall .globl alpha_ni_syscall
.ent alpha_ni_syscall .ent alpha_ni_syscall
......
...@@ -263,33 +263,35 @@ alpha_vfork(struct pt_regs *regs) ...@@ -263,33 +263,35 @@ alpha_vfork(struct pt_regs *regs)
/* /*
* Copy an alpha thread.. * Copy an alpha thread..
*
* Note the "stack_offset" stuff: when returning to kernel mode, we need
* to have some extra stack-space for the kernel stack that still exists
* after the "ret_from_fork". When returning to user mode, we only want
* the space needed by the syscall stack frame (ie "struct pt_regs").
* Use the passed "regs" pointer to determine how much space we need
* for a kernel fork().
*/ */
int int
copy_thread(unsigned long clone_flags, unsigned long usp, copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, unsigned long arg,
struct task_struct * p, struct pt_regs * regs) struct task_struct * p, struct pt_regs * regs)
{ {
extern void ret_from_fork(void); extern void ret_from_fork(void);
extern void ret_from_kernel_thread(void);
struct thread_info *childti = task_thread_info(p); struct thread_info *childti = task_thread_info(p);
struct pt_regs * childregs; struct pt_regs *childregs = task_pt_regs(p);
struct switch_stack * childstack, *stack; struct switch_stack *childstack, *stack;
unsigned long stack_offset, settls; unsigned long settls;
stack_offset = PAGE_SIZE - sizeof(struct pt_regs); childstack = ((struct switch_stack *) childregs) - 1;
if (!(regs->ps & 8)) if (unlikely(!regs)) {
stack_offset = (PAGE_SIZE-1) & (unsigned long) regs; /* kernel thread */
childregs = (struct pt_regs *) memset(childstack, 0,
(stack_offset + PAGE_SIZE + task_stack_page(p)); sizeof(struct switch_stack) + sizeof(struct pt_regs));
childstack->r26 = (unsigned long) ret_from_kernel_thread;
childstack->r9 = usp; /* function */
childstack->r10 = arg;
childregs->hae = alpha_mv.hae_cache,
childti->pcb.usp = 0;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.flags = 1; /* set FEN, clear everything else */
return 0;
}
*childregs = *regs; *childregs = *regs;
settls = regs->r20; settls = regs->r20;
childregs->r0 = 0; childregs->r0 = 0;
...@@ -297,7 +299,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -297,7 +299,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
regs->r20 = 0; regs->r20 = 0;
stack = ((struct switch_stack *) regs) - 1; stack = ((struct switch_stack *) regs) - 1;
childstack = ((struct switch_stack *) childregs) - 1;
*childstack = *stack; *childstack = *stack;
childstack->r26 = (unsigned long) ret_from_fork; childstack->r26 = (unsigned long) ret_from_fork;
childti->pcb.usp = usp; childti->pcb.usp = usp;
...@@ -385,27 +386,6 @@ dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task) ...@@ -385,27 +386,6 @@ dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
} }
EXPORT_SYMBOL(dump_elf_task_fp); EXPORT_SYMBOL(dump_elf_task_fp);
/*
* sys_execve() executes a new program.
*/
asmlinkage int
do_sys_execve(const char __user *ufilename,
const char __user *const __user *argv,
const char __user *const __user *envp, struct pt_regs *regs)
{
int error;
char *filename;
filename = getname(ufilename);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, argv, envp, regs);
putname(filename);
out:
return error;
}
/* /*
* Return saved PC of a blocked thread. This assumes the frame * Return saved PC of a blocked thread. This assumes the frame
* pointer is the 6th saved long on the kernel stack and that the * pointer is the 6th saved long on the kernel stack and that the
...@@ -459,22 +439,3 @@ get_wchan(struct task_struct *p) ...@@ -459,22 +439,3 @@ get_wchan(struct task_struct *p)
} }
return pc; return pc;
} }
int kernel_execve(const char *path, const char *const argv[], const char *const envp[])
{
/* Avoid the HAE being gratuitously wrong, which would cause us
to do the whole turn off interrupts thing and restore it. */
struct pt_regs regs = {.hae = alpha_mv.hae_cache};
int err = do_execve(path, argv, envp, &regs);
if (!err) {
struct pt_regs *p = current_pt_regs();
/* copy regs to normal position and off to userland we go... */
*p = regs;
__asm__ __volatile__ (
"mov %0, $sp;"
"br $31, ret_from_sys_call"
: : "r"(p));
}
return err;
}
EXPORT_SYMBOL(kernel_execve);
...@@ -52,6 +52,7 @@ config ARM ...@@ -52,6 +52,7 @@ config ARM
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
select GENERIC_KERNEL_THREAD
help help
The ARM series is a line of low-power-consumption RISC chip designs The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and licensed by ARM Ltd and targeted at embedded applications and
......
...@@ -85,11 +85,6 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -85,11 +85,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier() #define cpu_relax() barrier()
#endif #endif
/*
* Create a new kernel thread
*/
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
#define task_pt_regs(p) \ #define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
......
...@@ -254,6 +254,11 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) ...@@ -254,6 +254,11 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp; return regs->ARM_sp;
} }
#define current_pt_regs(void) ({ \
register unsigned long sp asm ("sp"); \
(struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \
})
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/exec.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/system_info.h> #include <asm/system_info.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
...@@ -478,6 +478,8 @@ ...@@ -478,6 +478,8 @@
#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_SOCKETCALL
#endif #endif
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_KERNEL_EXECVE
/* /*
* "Conditional" syscalls * "Conditional" syscalls
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
CALL(sys_creat) CALL(sys_creat)
CALL(sys_link) CALL(sys_link)
/* 10 */ CALL(sys_unlink) /* 10 */ CALL(sys_unlink)
CALL(sys_execve_wrapper) CALL(sys_execve)
CALL(sys_chdir) CALL(sys_chdir)
CALL(OBSOLETE(sys_time)) /* used by libc4 */ CALL(OBSOLETE(sys_time)) /* used by libc4 */
CALL(sys_mknod) CALL(sys_mknod)
......
...@@ -91,6 +91,30 @@ ENTRY(ret_from_fork) ...@@ -91,6 +91,30 @@ ENTRY(ret_from_fork)
b ret_slow_syscall b ret_slow_syscall
ENDPROC(ret_from_fork) ENDPROC(ret_from_fork)
ENTRY(ret_from_kernel_thread)
UNWIND(.fnstart)
UNWIND(.cantunwind)
bl schedule_tail
mov r0, r4
adr lr, BSYM(1f) @ kernel threads should not exit
mov pc, r5
1: bl do_exit
nop
UNWIND(.fnend)
ENDPROC(ret_from_kernel_thread)
/*
* turn a kernel thread into userland process
* use: ret_from_kernel_execve(struct pt_regs *normal)
*/
ENTRY(ret_from_kernel_execve)
mov why, #0 @ not a syscall
str why, [r0, #S_R0] @ ... and we want 0 in ->ARM_r0 as well
get_thread_info tsk @ thread structure
mov sp, r0 @ stack pointer just under pt_regs
b ret_slow_syscall
ENDPROC(ret_from_kernel_execve)
.equ NR_syscalls,0 .equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1 #define CALL(x) .equ NR_syscalls,NR_syscalls+1
#include "calls.S" #include "calls.S"
...@@ -517,11 +541,6 @@ sys_vfork_wrapper: ...@@ -517,11 +541,6 @@ sys_vfork_wrapper:
b sys_vfork b sys_vfork
ENDPROC(sys_vfork_wrapper) ENDPROC(sys_vfork_wrapper)
sys_execve_wrapper:
add r3, sp, #S_OFF
b sys_execve
ENDPROC(sys_execve_wrapper)
sys_clone_wrapper: sys_clone_wrapper:
add ip, sp, #S_OFF add ip, sp, #S_OFF
str ip, [sp, #4] str ip, [sp, #4]
......
...@@ -373,6 +373,7 @@ void release_thread(struct task_struct *dead_task) ...@@ -373,6 +373,7 @@ void release_thread(struct task_struct *dead_task)
} }
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
int int
copy_thread(unsigned long clone_flags, unsigned long stack_start, copy_thread(unsigned long clone_flags, unsigned long stack_start,
...@@ -381,13 +382,20 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -381,13 +382,20 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
struct thread_info *thread = task_thread_info(p); struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *childregs = task_pt_regs(p);
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
if (likely(regs)) {
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
thread->cpu_context.pc = (unsigned long)ret_from_fork;
} else {
thread->cpu_context.r4 = stk_sz;
thread->cpu_context.r5 = stack_start;
thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
childregs->ARM_cpsr = SVC_MODE;
}
thread->cpu_context.sp = (unsigned long)childregs; thread->cpu_context.sp = (unsigned long)childregs;
thread->cpu_context.pc = (unsigned long)ret_from_fork;
clear_ptrace_hw_breakpoint(p); clear_ptrace_hw_breakpoint(p);
...@@ -423,63 +431,6 @@ int dump_fpu (struct pt_regs *regs, struct user_fp *fp) ...@@ -423,63 +431,6 @@ int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
} }
EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_fpu);
/*
* Shuffle the argument into the correct register before calling the
* thread function. r4 is the thread argument, r5 is the pointer to
* the thread function, and r6 points to the exit function.
*/
extern void kernel_thread_helper(void);
asm( ".pushsection .text\n"
" .align\n"
" .type kernel_thread_helper, #function\n"
"kernel_thread_helper:\n"
#ifdef CONFIG_TRACE_IRQFLAGS
" bl trace_hardirqs_on\n"
#endif
" msr cpsr_c, r7\n"
" mov r0, r4\n"
" mov lr, r6\n"
" mov pc, r5\n"
" .size kernel_thread_helper, . - kernel_thread_helper\n"
" .popsection");
#ifdef CONFIG_ARM_UNWIND
extern void kernel_thread_exit(long code);
asm( ".pushsection .text\n"
" .align\n"
" .type kernel_thread_exit, #function\n"
"kernel_thread_exit:\n"
" .fnstart\n"
" .cantunwind\n"
" bl do_exit\n"
" nop\n"
" .fnend\n"
" .size kernel_thread_exit, . - kernel_thread_exit\n"
" .popsection");
#else
#define kernel_thread_exit do_exit
#endif
/*
* Create a kernel thread.
*/
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
regs.ARM_r4 = (unsigned long)arg;
regs.ARM_r5 = (unsigned long)fn;
regs.ARM_r6 = (unsigned long)kernel_thread_exit;
regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
struct stackframe frame; struct stackframe frame;
......
...@@ -59,69 +59,6 @@ asmlinkage int sys_vfork(struct pt_regs *regs) ...@@ -59,69 +59,6 @@ asmlinkage int sys_vfork(struct pt_regs *regs)
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL); return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
} }
/* sys_execve() executes a new program.
* This is called indirectly via a small wrapper
*/
asmlinkage int sys_execve(const char __user *filenamei,
const char __user *const __user *argv,
const char __user *const __user *envp, struct pt_regs *regs)
{
int error;
char * filename;
filename = getname(filenamei);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, argv, envp, regs);
putname(filename);
out:
return error;
}
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
struct pt_regs regs;
int ret;
memset(&regs, 0, sizeof(struct pt_regs));
ret = do_execve(filename,
(const char __user *const __user *)argv,
(const char __user *const __user *)envp, &regs);
if (ret < 0)
goto out;
/*
* Save argc to the register structure for userspace.
*/
regs.ARM_r0 = ret;
/*
* We were successful. We won't be returning to our caller, but
* instead to user space by manipulating the kernel stack.
*/
asm( "add r0, %0, %1\n\t"
"mov r1, %2\n\t"
"mov r2, %3\n\t"
"bl memmove\n\t" /* copy regs to top of stack */
"mov r8, #0\n\t" /* not a syscall */
"mov r9, %0\n\t" /* thread structure */
"mov sp, r0\n\t" /* reposition stack pointer */
"b ret_to_user"
:
: "r" (current_thread_info()),
"Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
out:
return ret;
}
EXPORT_SYMBOL(kernel_execve);
/* /*
* Since loff_t is a 64 bit type we avoid a lot of ABI hassle * Since loff_t is a 64 bit type we avoid a lot of ABI hassle
* with a different argument ordering. * with a different argument ordering.
......
include include/asm-generic/Kbuild.asm include include/asm-generic/Kbuild.asm
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h
header-y += cachectl.h header-y += cachectl.h
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_EXEC_H
#define __ASM_AVR32_EXEC_H
#define arch_align_stack(x) (x)
#endif /* __ASM_AVR32_EXEC_H */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment