1 /* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/process.c
4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 * This file handles the architecture-dependent parts of process handling..
15 #include <linux/config.h>
16 #include <linux/errno.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/kallsyms.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/stddef.h>
25 #include <linux/ptrace.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/a.out.h>
29 #include <linux/config.h>
30 #include <linux/reboot.h>
31 #include <linux/delay.h>
32 #include <linux/compat.h>
33 #include <linux/init.h>
35 #include <asm/oplib.h>
36 #include <asm/uaccess.h>
37 #include <asm/system.h>
39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h>
41 #include <asm/processor.h>
42 #include <asm/pstate.h>
44 #include <asm/fpumacro.h>
46 #include <asm/cpudata.h>
47 #include <asm/mmu_context.h>
48 #include <asm/unistd.h>
50 /* #define VERBOSE_SHOWREGS */
53 * Nothing special yet...
55 void default_idle(void)
62 * the idle loop on a Sparc... ;)
66 /* endless idle loop with no priority at all */
68 /* If current->work.need_resched is zero we should really
69 * setup for a system wakup event and execute a shutdown
72 * But this requires writing back the contents of the
73 * L2 cache etc. so implement this later. -DaveM
75 while (!need_resched())
78 preempt_enable_no_resched();
88 * the idle loop on a UltraMultiPenguin...
90 * TIF_POLLING_NRFLAG is set because we do not sleep the cpu
91 * inside of the idler task, so an interrupt is not needed
92 * to get a clean fast response.
94 * XXX Reverify this assumption... -DaveM
96 * Addendum: We do want it to do something for the signal
97 * delivery case, we detect that by just seeing
98 * if we are trying to send this to an idler or not.
102 cpuinfo_sparc *cpuinfo = &local_cpu_data();
103 set_thread_flag(TIF_POLLING_NRFLAG);
106 if (need_resched()) {
107 cpuinfo->idle_volume = 0;
108 preempt_enable_no_resched();
113 cpuinfo->idle_volume++;
115 /* The store ordering is so that IRQ handlers on
116 * other cpus see our increasing idleness for the buddy
117 * redistribution algorithm. -DaveM
119 membar_storeload_storestore();
125 extern char reboot_command [];
127 extern void (*prom_palette)(int);
128 extern void (*prom_keyboard)(void);
130 void machine_halt(void)
132 if (!serial_console && prom_palette)
137 panic("Halt failed!");
140 void machine_alt_power_off(void)
142 if (!serial_console && prom_palette)
146 prom_halt_power_off();
147 panic("Power-off failed!");
150 void machine_restart(char * cmd)
154 p = strchr (reboot_command, '\n');
156 if (!serial_console && prom_palette)
163 prom_reboot(reboot_command);
165 panic("Reboot failed!");
169 static void show_regwindow32(struct pt_regs *regs)
171 struct reg_window32 __user *rw;
172 struct reg_window32 r_w;
175 __asm__ __volatile__ ("flushw");
176 rw = compat_ptr((unsigned)regs->u_regs[14]);
179 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
185 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
186 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
187 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
188 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
189 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
190 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
191 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
192 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
195 #define show_regwindow32(regs) do { } while (0)
198 static void show_regwindow(struct pt_regs *regs)
200 struct reg_window __user *rw;
201 struct reg_window *rwk;
202 struct reg_window r_w;
205 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
206 __asm__ __volatile__ ("flushw");
207 rw = (struct reg_window __user *)
208 (regs->u_regs[14] + STACK_BIAS);
209 rwk = (struct reg_window *)
210 (regs->u_regs[14] + STACK_BIAS);
211 if (!(regs->tstate & TSTATE_PRIV)) {
214 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
222 show_regwindow32(regs);
225 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
226 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
227 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
228 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
229 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
230 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
231 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
232 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
233 if (regs->tstate & TSTATE_PRIV)
234 print_symbol("I7: <%s>\n", rwk->ins[7]);
237 void show_stackframe(struct sparc_stackf *sf)
243 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
244 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
245 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
246 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
247 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
248 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
249 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
250 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
251 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
252 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
253 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
254 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
256 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
257 size -= STACKFRAME_SZ;
258 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
261 printk("s%d: %016lx\n", i++, *stk++);
262 } while ((size -= sizeof(unsigned long)));
265 void show_stackframe32(struct sparc_stackf32 *sf)
271 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
272 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
273 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
274 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
275 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
276 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
277 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
278 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
279 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
280 "x3: %08x x4: %08x x5: %08x xx: %08x\n",
281 sf->structptr, sf->xargs[0], sf->xargs[1],
282 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
284 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
285 size -= STACKFRAME32_SZ;
286 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
289 printk("s%d: %08x\n", i++, *stk++);
290 } while ((size -= sizeof(unsigned)));
294 static DEFINE_SPINLOCK(regdump_lock);
297 void __show_regs(struct pt_regs * regs)
302 /* Protect against xcall ipis which might lead to livelock on the lock */
303 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
304 "wrpr %0, %1, %%pstate"
307 spin_lock(®dump_lock);
309 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
310 regs->tpc, regs->tnpc, regs->y, print_tainted());
311 print_symbol("TPC: <%s>\n", regs->tpc);
312 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
313 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
315 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
316 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
318 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
319 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
321 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
322 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
324 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
325 show_regwindow(regs);
327 spin_unlock(®dump_lock);
328 __asm__ __volatile__("wrpr %0, 0, %%pstate"
333 #ifdef VERBOSE_SHOWREGS
334 static void idump_from_user (unsigned int *pc)
339 if((((unsigned long) pc) & 3))
343 for(i = -3; i < 6; i++) {
345 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
352 void show_regs(struct pt_regs *regs)
354 #ifdef VERBOSE_SHOWREGS
355 extern long etrap, etraptl1;
360 extern void smp_report_regs(void);
366 #ifdef VERBOSE_SHOWREGS
367 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
368 regs->u_regs[14] >= (long)current - PAGE_SIZE &&
369 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
370 printk ("*********parent**********\n");
371 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
372 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
373 printk ("*********endpar**********\n");
378 void show_regs32(struct pt_regs32 *regs)
380 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
381 regs->pc, regs->npc, regs->y, print_tainted());
382 printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
383 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
385 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
386 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
388 printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
389 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
391 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
392 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
396 unsigned long thread_saved_pc(struct task_struct *tsk)
398 struct thread_info *ti = task_thread_info(tsk);
399 unsigned long ret = 0xdeadbeefUL;
403 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
404 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
407 fp = (unsigned long *)(sp[14] + STACK_BIAS);
408 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
415 /* Free current thread data structures etc.. */
416 void exit_thread(void)
418 struct thread_info *t = current_thread_info();
421 if (t->utraps[0] < 2)
427 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
428 t->user_cntd0 = t->user_cntd1 = NULL;
434 void flush_thread(void)
436 struct thread_info *t = current_thread_info();
437 struct mm_struct *mm;
439 if (t->flags & _TIF_ABI_PENDING)
440 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
444 tsb_context_switch(__pa(mm->pgd),
445 mm->context.sparc64_tsb);
447 set_thread_wsaved(0);
449 /* Turn off performance counters if on. */
450 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
451 t->user_cntd0 = t->user_cntd1 = NULL;
456 /* Clear FPU register state. */
459 if (get_thread_current_ds() != ASI_AIUS)
462 /* Init new signal delivery disposition. */
463 clear_thread_flag(TIF_NEWSIGNALS);
466 /* It's a bit more tricky when 64-bit tasks are involved... */
467 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
469 unsigned long fp, distance, rval;
471 if (!(test_thread_flag(TIF_32BIT))) {
474 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
477 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
479 /* Now 8-byte align the stack as this is mandatory in the
480 * Sparc ABI due to how register windows work. This hides
481 * the restriction from thread libraries etc. -DaveM
486 rval = (csp - distance);
487 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
489 else if (test_thread_flag(TIF_32BIT)) {
490 if (put_user(((u32)csp),
491 &(((struct reg_window32 __user *)rval)->ins[6])))
494 if (put_user(((u64)csp - STACK_BIAS),
495 &(((struct reg_window __user *)rval)->ins[6])))
498 rval = rval - STACK_BIAS;
504 /* Standard stuff. */
505 static inline void shift_window_buffer(int first_win, int last_win,
506 struct thread_info *t)
510 for (i = first_win; i < last_win; i++) {
511 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
512 memcpy(&t->reg_window[i], &t->reg_window[i+1],
513 sizeof(struct reg_window));
517 void synchronize_user_stack(void)
519 struct thread_info *t = current_thread_info();
520 unsigned long window;
522 flush_user_windows();
523 if ((window = get_thread_wsaved()) != 0) {
524 int winsize = sizeof(struct reg_window);
527 if (test_thread_flag(TIF_32BIT))
528 winsize = sizeof(struct reg_window32);
534 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
535 struct reg_window *rwin = &t->reg_window[window];
537 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
538 shift_window_buffer(window, get_thread_wsaved() - 1, t);
539 set_thread_wsaved(get_thread_wsaved() - 1);
545 void fault_in_user_windows(void)
547 struct thread_info *t = current_thread_info();
548 unsigned long window;
549 int winsize = sizeof(struct reg_window);
552 if (test_thread_flag(TIF_32BIT))
553 winsize = sizeof(struct reg_window32);
557 flush_user_windows();
558 window = get_thread_wsaved();
563 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
564 struct reg_window *rwin = &t->reg_window[window];
566 if (copy_to_user((char __user *)sp, rwin, winsize))
570 set_thread_wsaved(0);
574 set_thread_wsaved(window + 1);
578 asmlinkage long sparc_do_fork(unsigned long clone_flags,
579 unsigned long stack_start,
580 struct pt_regs *regs,
581 unsigned long stack_size)
583 int __user *parent_tid_ptr, *child_tid_ptr;
586 if (test_thread_flag(TIF_32BIT)) {
587 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
588 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
592 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
593 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
596 return do_fork(clone_flags, stack_start,
598 parent_tid_ptr, child_tid_ptr);
601 /* Copy a Sparc thread. The fork() return value conventions
602 * under SunOS are nothing short of bletcherous:
603 * Parent --> %o0 == childs pid, %o1 == 0
604 * Child --> %o0 == parents pid, %o1 == 1
606 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
607 unsigned long unused,
608 struct task_struct *p, struct pt_regs *regs)
610 struct thread_info *t = task_thread_info(p);
611 char *child_trap_frame;
613 /* Calculate offset to stack_frame & pt_regs */
614 child_trap_frame = task_stack_page(p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
615 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
617 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
618 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
620 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
621 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
624 if (regs->tstate & TSTATE_PRIV) {
625 /* Special case, if we are spawning a kernel thread from
626 * a userspace task (via KMOD, NFS, or similar) we must
627 * disable performance counters in the child because the
628 * address space and protection realm are changing.
630 if (t->flags & _TIF_PERFCTR) {
631 t->user_cntd0 = t->user_cntd1 = NULL;
633 t->flags &= ~_TIF_PERFCTR;
635 t->kregs->u_regs[UREG_FP] = t->ksp;
636 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
637 flush_register_windows();
638 memcpy((void *)(t->ksp + STACK_BIAS),
639 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
640 sizeof(struct sparc_stackf));
641 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
642 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
644 if (t->flags & _TIF_32BIT) {
645 sp &= 0x00000000ffffffffUL;
646 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
648 t->kregs->u_regs[UREG_FP] = sp;
649 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
650 if (sp != regs->u_regs[UREG_FP]) {
653 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
656 t->kregs->u_regs[UREG_FP] = csp;
662 /* Set the return value for the child. */
663 t->kregs->u_regs[UREG_I0] = current->pid;
664 t->kregs->u_regs[UREG_I1] = 1;
666 /* Set the second return value for the parent. */
667 regs->u_regs[UREG_I1] = 0;
669 if (clone_flags & CLONE_SETTLS)
670 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
676 * This is the mechanism for creating a new kernel thread.
678 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
679 * who haven't done an "execve()") should use this: it will work within
680 * a system call from a "real" process, but the process memory space will
681 * not be free'd until both the parent and the child have exited.
683 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
687 /* If the parent runs before fn(arg) is called by the child,
688 * the input registers of this function can be clobbered.
689 * So we stash 'fn' and 'arg' into global registers which
690 * will not be modified by the parent.
692 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
693 "mov %5, %%g3\n\t" /* Save ARG into global */
694 "mov %1, %%g1\n\t" /* Clone syscall nr. */
695 "mov %2, %%o0\n\t" /* Clone flags. */
696 "mov 0, %%o1\n\t" /* usp arg == 0 */
697 "t 0x6d\n\t" /* Linux/Sparc clone(). */
698 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
700 "jmpl %%g2, %%o7\n\t" /* Call the function. */
701 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
703 "t 0x6d\n\t" /* Linux/Sparc exit(). */
704 /* Notreached by child. */
707 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
708 "i" (__NR_exit), "r" (fn), "r" (arg) :
709 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
714 * fill in the user structure for a core dump..
716 void dump_thread(struct pt_regs * regs, struct user * dump)
718 /* Only should be used for SunOS and ancient a.out
719 * SparcLinux binaries... Not worth implementing.
721 memset(dump, 0, sizeof(struct user));
726 unsigned int pr_regs[32];
727 unsigned long pr_dregs[16];
729 unsigned int __unused;
731 unsigned char pr_qcnt;
732 unsigned char pr_q_entrysize;
734 unsigned int pr_q[64];
738 * fill in the fpu structure for a core dump.
740 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
742 unsigned long *kfpregs = current_thread_info()->fpregs;
743 unsigned long fprs = current_thread_info()->fpsaved[0];
745 if (test_thread_flag(TIF_32BIT)) {
746 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
749 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
750 sizeof(unsigned int) * 32);
752 memset(&fpregs32->pr_fr.pr_regs[0], 0,
753 sizeof(unsigned int) * 32);
754 fpregs32->pr_qcnt = 0;
755 fpregs32->pr_q_entrysize = 8;
756 memset(&fpregs32->pr_q[0], 0,
757 (sizeof(unsigned int) * 64));
758 if (fprs & FPRS_FEF) {
759 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
762 fpregs32->pr_fsr = 0;
767 memcpy(&fpregs->pr_regs[0], kfpregs,
768 sizeof(unsigned int) * 32);
770 memset(&fpregs->pr_regs[0], 0,
771 sizeof(unsigned int) * 32);
773 memcpy(&fpregs->pr_regs[16], kfpregs+16,
774 sizeof(unsigned int) * 32);
776 memset(&fpregs->pr_regs[16], 0,
777 sizeof(unsigned int) * 32);
778 if(fprs & FPRS_FEF) {
779 fpregs->pr_fsr = current_thread_info()->xfsr[0];
780 fpregs->pr_gsr = current_thread_info()->gsr[0];
782 fpregs->pr_fsr = fpregs->pr_gsr = 0;
784 fpregs->pr_fprs = fprs;
790 * sparc_execve() executes a new program after the asm stub has set
791 * things up for us. This should basically do what I want it to.
793 asmlinkage int sparc_execve(struct pt_regs *regs)
798 /* User register window flush is done by entry.S */
800 /* Check for indirect call. */
801 if (regs->u_regs[UREG_G1] == 0)
804 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
805 error = PTR_ERR(filename);
806 if (IS_ERR(filename))
808 error = do_execve(filename,
809 (char __user * __user *)
810 regs->u_regs[base + UREG_I1],
811 (char __user * __user *)
812 regs->u_regs[base + UREG_I2], regs);
816 current_thread_info()->xfsr[0] = 0;
817 current_thread_info()->fpsaved[0] = 0;
818 regs->tstate &= ~TSTATE_PEF;
820 current->ptrace &= ~PT_DTRACE;
821 task_unlock(current);
827 unsigned long get_wchan(struct task_struct *task)
829 unsigned long pc, fp, bias = 0;
830 unsigned long thread_info_base;
831 struct reg_window *rw;
832 unsigned long ret = 0;
835 if (!task || task == current ||
836 task->state == TASK_RUNNING)
839 thread_info_base = (unsigned long) task_stack_page(task);
841 fp = task_thread_info(task)->ksp + bias;
844 /* Bogus frame pointer? */
845 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
846 fp >= (thread_info_base + THREAD_SIZE))
848 rw = (struct reg_window *) fp;
850 if (!in_sched_functions(pc)) {
854 fp = rw->ins[6] + bias;
855 } while (++count < 16);