Date: Sun, 5 Mar 2006 02:42:46 GMT From: Kip Macy <kmacy@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 92763 for review Message-ID: <200603050242.k252gko0008782@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=92763 Change 92763 by kmacy@kmacy_storage:sun4v_work on 2006/03/05 02:42:39 remove last vestiges of original code Affected files ... .. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#15 edit Differences ... ==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#15 (text+ko) ==== @@ -1,34 +1,5 @@ /*- - * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Berkeley Software Design Inc's name may not be used to endorse or - * promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $ - */ -/*- - * Copyright (c) 2001 Jake Burkholder. + * Copyright (c) 2006 Kip Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -51,10 +22,15 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. + * + + + * $ Exp $ */ + #include <machine/asm.h> -__FBSDID("$FreeBSD: src/sys/sparc64/sparc64/exception.S,v 1.71 2006/01/16 01:35:25 kris Exp $"); +__FBSDID("$$") #include "opt_compat.h" #include "opt_ddb.h" @@ -83,8 +59,6 @@ #endif #define REGSIZE 8 -#define TSB_KERNEL_MASK 0x0 -#define TSB_KERNEL 0x0 .register %g2,#ignore .register %g3,#ignore @@ -189,216 +163,6 @@ ldda [%g3 + %g1]asi, %i4 ;\ ldda [%g3 + %g2]asi, %i6 -#define ERRATUM50(reg) mov reg, reg - -#define KSTACK_SLOP 1024 - -/* - * Sanity check the kernel stack and bail out if its wrong. - * XXX: doesn't handle being on the panic stack. - */ -#define KSTACK_CHECK \ - dec 16, ASP_REG ; \ - stx %g1, [ASP_REG + 0] ; \ - stx %g2, [ASP_REG + 8] ; \ - add %sp, SPOFF, %g1 ; \ - andcc %g1, (1 << PTR_SHIFT) - 1, %g0 ; \ - bnz,a %xcc, tl1_kstack_fault ; \ - inc 16, ASP_REG ; \ - ldx [PCPU(CURTHREAD)], %g2 ; \ - ldx [%g2 + TD_KSTACK], %g2 ; \ - add %g2, KSTACK_SLOP, %g2 ; \ - subcc %g1, %g2, %g1 ; \ - ble,a %xcc, tl1_kstack_fault ; \ - inc 16, ASP_REG ; \ - set KSTACK_PAGES * PAGE_SIZE, %g2 ; \ - cmp %g1, %g2 ; \ - bgt,a %xcc, tl1_kstack_fault ; \ - inc 16, ASP_REG ; \ - ldx [ASP_REG + 8], %g2 ; \ - ldx [ASP_REG + 0], %g1 ; \ - inc 16, ASP_REG -#if 0 -ENTRY(tl1_kstack_fault) - rdpr %tl, %g1 -1: cmp %g1, 2 - be,a 2f - nop - -#if KTR_COMPILE & KTR_TRAP - CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx" - , %g2, %g3, %g4, 7, 8, 9) - rdpr %tl, %g3 - stx %g3, [%g2 + KTR_PARM1] - rdpr %tpc, %g3 - stx %g3, [%g2 + KTR_PARM1] - rdpr %tnpc, %g3 - stx %g3, [%g2 + KTR_PARM1] -9: -#endif - - sub %g1, 1, %g1 - wrpr %g1, 0, %tl - ba,a %xcc, 1b - nop - -2: -#if KTR_COMPILE & KTR_TRAP - CATR(KTR_TRAP, - "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx" - , %g1, %g2, %g3, 7, 8, 9) - add %sp, SPOFF, %g2 - stx %g2, [%g1 + KTR_PARM1] - ldx [PCPU(CURTHREAD)], %g2 - ldx [%g2 + TD_KSTACK], %g2 - stx %g2, [%g1 + KTR_PARM2] - rdpr %canrestore, %g2 - stx %g2, [%g1 + KTR_PARM3] - rdpr %cansave, %g2 - stx %g2, [%g1 + KTR_PARM4] - rdpr %otherwin, %g2 - stx %g2, [%g1 + KTR_PARM5] - rdpr %wstate, %g2 - stx %g2, [%g1 + KTR_PARM6] -9: -#endif - - wrpr %g0, 0, %canrestore - wrpr %g0, 6, %cansave - wrpr %g0, 0, %otherwin - wrpr %g0, WSTATE_KERNEL, %wstate - - sub ASP_REG, SPOFF + CCFSZ, %sp - clr %fp - - set trap, %o2 - ba %xcc, tl1_trap - mov T_KSTACK_FAULT | T_KERNEL, %o0 -END(tl1_kstack_fault) -#endif - -/* - * Magic to resume from a spill or fill trap. If we get an alignment or an - * mmu fault during a spill or a fill, this macro will detect the fault and - * resume at a set instruction offset in the trap handler. - * - * To check if the previous trap was a spill/fill we convert the trapped pc - * to a trap type and verify that it is in the range of spill/fill vectors. - * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the - * tl bit allows us to detect both ranges with one test. - * - * This is: - * 0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100 - * - * To calculate the new pc we take advantage of the xor feature of wrpr. - * Forcing all the low bits of the trapped pc on we can produce any offset - * into the spill/fill vector. The size of a spill/fill trap vector is 0x80. - * - * 0x7f ^ 0x1f == 0x60 - * 0x1f == (0x80 - 0x60) - 1 - * - * Which are the offset and xor value used to resume from alignment faults. - */ - -/* - * Determine if we have trapped inside of a spill/fill vector, and if so resume - * at a fixed instruction offset in the trap vector. Must be called on - * alternate globals. - */ -#define RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \ - dec 16, ASP_REG ; \ - stx %g1, [ASP_REG + 0] ; \ - stx %g2, [ASP_REG + 8] ; \ - rdpr %tpc, %g1 ; \ - ERRATUM50(%g1) ; \ - rdpr %tba, %g2 ; \ - sub %g1, %g2, %g2 ; \ - srlx %g2, 5, %g2 ; \ - andn %g2, 0x200, %g2 ; \ - cmp %g2, 0x80 ; \ - blu,pt %xcc, 9f ; \ - cmp %g2, 0x100 ; \ - bgeu,pt %xcc, 9f ; \ - or %g1, 0x7f, %g1 ; \ - wrpr %g1, xor, %tnpc ; \ - stxa_g0_sfsr ; \ - ldx [ASP_REG + 8], %g2 ; \ - ldx [ASP_REG + 0], %g1 ; \ - inc 16, ASP_REG ; \ - done ; \ -9: ldx [ASP_REG + 8], %g2 ; \ - ldx [ASP_REG + 0], %g1 ; \ - inc 16, ASP_REG - -#define RSF_XOR(off) ((0x80 - off) - 1) - -/* - * Instruction offsets in spill and fill trap handlers for handling certain - * nested traps, and corresponding xor constants for wrpr. - */ -#define RSF_OFF_ALIGN 0x60 -#define RSF_OFF_MMU 0x70 - -#define RESUME_SPILLFILL_ALIGN \ - RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN)) -#define RESUME_SPILLFILL_MMU \ - RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU)) -#define RESUME_SPILLFILL_MMU_CLR_SFSR \ - RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU)) - -/* - * Constant to add to %tnpc when taking a fill trap just before returning to - * user mode. - */ -#define RSF_FILL_INC tl0_ret_fill_end - tl0_ret_fill - -/* - * Retry a spill or fill with a different wstate due to an alignment fault. - * We may just be using the wrong stack offset. - */ -#define RSF_ALIGN_RETRY(ws) \ - wrpr %g0, (ws), %wstate ; \ - retry ; \ - .align 16 - - -ENTRY(rsf_fatal) -#if KTR_COMPILE & KTR_TRAP - CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx" - , %g1, %g3, %g4, 7, 8, 9) - rdpr %tt, %g3 - stx %g3, [%g1 + KTR_PARM1] - stx %g2, [%g1 + KTR_PARM2] -9: -#endif -#if 0 - KSTACK_CHECK -#endif - sir -END(rsf_fatal) - - .comm intrnames, IV_NAMLEN - .comm eintrnames, 0 - - .comm intrcnt, IV_MAX * 8 - .comm eintrcnt, 0 - -/* - * Trap table and associated macros - * - * Due to its size a trap table is an inherently hard thing to represent in - * code in a clean way. There are approximately 1024 vectors, of 8 or 32 - * instructions each, many of which are identical. The way that this is - * layed out is the instructions (8 or 32) for the actual trap vector appear - * as an AS macro. In general this code branches to tl0_trap or tl1_trap, - * but if not supporting code can be placed just after the definition of the - * macro. The macros are then instantiated in a different section (.trap), - * which is setup to be placed by the linker at the beginning of .text, and the - * code around the macros is moved to the end of trap table. In this way the - * code that must be sequential in memory can be split up, and located near - * its supporting code so that it is easier to follow. - */ - /* * Clean window traps occur when %cleanwin is zero to ensure that data * is not leaked between address spaces in registers. @@ -433,7 +197,7 @@ .macro tl0_setup type tl0_split - ba %xcc, tl0_utrap + ba %xcc, tl0_trap mov \type, %g1 .endm @@ -1245,457 +1009,3 @@ END(tl0_trap) END(tl0_intr) -/* - * Initiate return to usermode. - * - * Called with a trapframe on the stack. The window that was setup in - * tl0_trap may have been used by "fast" trap handlers that pretend to be - * leaf functions, so all ins and locals may have been clobbered since - * then. - * - * This code is rather long and complicated. - */ -ENTRY(tl0_ret) - /* - * Check for pending asts atomically with returning. We must raise - * the pil before checking, and if no asts are found the pil must - * remain raised until the retry is executed, or we risk missing asts - * caused by interrupts occuring after the test. If the pil is lowered, - * as it is when we call ast, the check must be re-executed. - */ - wrpr %g0, PIL_TICK, %pil - ldx [PCPU(CURTHREAD)], %l0 - lduw [%l0 + TD_FLAGS], %l1 - set TDF_ASTPENDING | TDF_NEEDRESCHED, %l2 - and %l1, %l2, %l1 - brz,a,pt %l1, 1f - nop - - /* - * We have an ast. Re-enable interrupts and handle it, then restart - * the return sequence. - */ - wrpr %g0, 0, %pil - call ast - add %sp, CCFSZ + SPOFF, %o0 - ba,a %xcc, tl0_ret - nop - - /* - * Check for windows that were spilled to the pcb and need to be - * copied out. This must be the last thing that is done before the - * return to usermode. If there are still user windows in the cpu - * and we call a nested function after this, which causes them to be - * spilled to the pcb, they will not be copied out and the stack will - * be inconsistent. - */ -#if 0 -1: ldx [PCB_REG + PCB_NSAVED], %l1 -#endif - brz,a,pt %l1, 2f - nop - wrpr %g0, 0, %pil - mov T_SPILL, %o0 - stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE] - call trap - add %sp, SPOFF + CCFSZ, %o0 - ba,a %xcc, tl0_ret - nop - - /* - * Restore the out and most global registers from the trapframe. - * The ins will become the outs when we restore below. - */ -2: ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0 - ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1 - ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2 - ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3 - ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4 - ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5 - ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6 - ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7 - - ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1 - ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2 - ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3 - ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4 - ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5 - - /* - * Load everything we need to restore below before disabling - * interrupts. - */ - ldx [%sp + SPOFF + CCFSZ + TF_FPRS], %l0 - ldx [%sp + SPOFF + CCFSZ + TF_GSR], %l1 - ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2 - ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l3 - ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l4 - ldx [%sp + SPOFF + CCFSZ + TF_Y], %l5 - ldx [%sp + SPOFF + CCFSZ + TF_WSTATE], %l6 - - /* - * Disable interrupts to restore the special globals. They are not - * saved and restored for all kernel traps, so an interrupt at the - * wrong time would clobber them. - */ - wrpr %g0, PSTATE_NORMAL, %pstate - - ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6 - ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7 - - /* - * Switch to alternate globals. This frees up some registers we - * can use after the restore changes our window. - */ -#if 0 - LOAD_ALT -#endif - /* - * Drop %pil to zero. It must have been zero at the time of the - * trap, since we were in usermode, but it was raised above in - * order to check for asts atomically. We have interrupts disabled - * so any interrupts will not be serviced until we complete the - * return to usermode. - */ - wrpr %g0, 0, %pil - - /* - * Save %fprs in an alternate global so it can be restored after the - * restore instruction below. If we restore it before the restore, - * and the restore traps we may run for a while with floating point - * enabled in the kernel, which we want to avoid. - */ - mov %l0, %g1 - - /* - * Restore %fsr and %gsr. These need floating point enabled in %fprs, - * so we set it temporarily and then clear it. - */ - wr %g0, FPRS_FEF, %fprs - ldx [%sp + SPOFF + CCFSZ + TF_FSR], %fsr - wr %l1, 0, %gsr - wr %g0, 0, %fprs - - /* - * Restore program counters. This could be done after the restore - * but we're out of alternate globals to store them in... - */ - wrpr %l2, 0, %tnpc - wrpr %l3, 0, %tpc - - /* - * Save %tstate in an alternate global and clear the %cwp field. %cwp - * will be affected by the restore below and we need to make sure it - * points to the current window at that time, not the window that was - * active at the time of the trap. - */ - andn %l4, TSTATE_CWP_MASK, %g2 - - /* - * Restore %y. Could also be below if we had more alternate globals. - */ - wr %l5, 0, %y - - /* - * Setup %wstate for return. We need to restore the user window state - * which we saved in wstate.other when we trapped. We also need to - * set the transition bit so the restore will be handled specially - * if it traps, use the xor feature of wrpr to do that. - */ -#if 0 - srlx %l6, WSTATE_OTHER_SHIFT, %g3 - wrpr %g3, WSTATE_TRANSITION, %wstate -#endif - /* - * Setup window management registers for return. If not all user - * windows were spilled in the kernel %otherwin will be non-zero, - * so we need to transfer it to %canrestore to correctly restore - * those windows. Otherwise everything gets set to zero and the - * restore below will fill a window directly from the user stack. - */ - rdpr %otherwin, %o0 - wrpr %o0, 0, %canrestore - wrpr %g0, 0, %otherwin - wrpr %o0, 0, %cleanwin - - /* - * Now do the restore. If this instruction causes a fill trap which - * fails to fill a window from the user stack, we will resume at - * tl0_ret_fill_end and call back into the kernel. - */ - restore -tl0_ret_fill: - - /* - * We made it. We're back in the window that was active at the time - * of the trap, and ready to return to usermode. - */ - - /* - * Restore %frps. This was saved in an alternate global above. - */ - wr %g1, 0, %fprs - - /* - * Fixup %tstate so the saved %cwp points to the current window and - * restore it. - */ - rdpr %cwp, %g4 - wrpr %g2, %g4, %tstate - - /* - * Restore the user window state. The transition bit was set above - * for special handling of the restore, this clears it. - */ - wrpr %g3, 0, %wstate - -#if KTR_COMPILE & KTR_TRAP - CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx" - , %g2, %g3, %g4, 7, 8, 9) - ldx [PCPU(CURTHREAD)], %g3 - stx %g3, [%g2 + KTR_PARM1] - rdpr %pil, %g3 - stx %g3, [%g2 + KTR_PARM2] - rdpr %tpc, %g3 - stx %g3, [%g2 + KTR_PARM3] - rdpr %tnpc, %g3 - stx %g3, [%g2 + KTR_PARM4] - stx %sp, [%g2 + KTR_PARM5] -9: -#endif - - /* - * Return to usermode. - */ - retry -tl0_ret_fill_end: - -#if KTR_COMPILE & KTR_TRAP - CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx" - , %l0, %l1, %l2, 7, 8, 9) - rdpr %pstate, %l1 - stx %l1, [%l0 + KTR_PARM1] - stx %l5, [%l0 + KTR_PARM2] - stx %sp, [%l0 + KTR_PARM3] -9: -#endif - - /* - * The restore above caused a fill trap and the fill handler was - * unable to fill a window from the user stack. The special fill - * handler recognized this and punted, sending us here. We need - * to carefully undo any state that was restored before the restore - * was executed and call trap again. Trap will copyin a window - * from the user stack which will fault in the page we need so the - * restore above will succeed when we try again. If this fails - * the process has trashed its stack, so we kill it. - */ - - /* - * Restore the kernel window state. This was saved in %l6 above, and - * since the restore failed we're back in the same window. - */ - wrpr %l6, 0, %wstate - - /* - * Restore the normal globals which have predefined values in the - * kernel. We clobbered them above restoring the user's globals - * so this is very important. - */ - GET_PCB(PCB_REG) - mov PCB_REG, %o0 - mov PCPU_REG, %o1 - wrpr %g0, PSTATE_NORMAL, %pstate - mov %o0, PCB_REG - mov %o1, PCPU_REG - wrpr %g0, PSTATE_KERNEL, %pstate - - /* - * Simulate a fill trap and then start the whole return sequence over - * again. This is special because it only copies in 1 window, not 2 - * as we would for a normal failed fill. This may be the first time - * the process has been run, so there may not be 2 windows worth of - * stack to copyin. - */ - mov T_FILL_RET, %o0 - stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE] - call trap - add %sp, SPOFF + CCFSZ, %o0 - ba,a %xcc, tl0_ret - nop -END(tl0_ret) - -/* - * Kernel trap entry point - * - * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar, - * u_int sfsr) - * - * This is easy because the stack is already setup and the windows don't need - * to be split. We build a trapframe and call trap(), the same as above, but - * the outs don't need to be saved. - */ -ENTRY(tl1_trap) - rdpr %tstate, %l0 - rdpr %tpc, %l1 - rdpr %tnpc, %l2 - rdpr %pil, %l3 - rd %y, %l4 - rdpr %wstate, %l5 - -#if KTR_COMPILE & KTR_TRAP - CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx" - , %g1, %g2, %g3, 7, 8, 9) - ldx [PCPU(CURTHREAD)], %g2 - stx %g2, [%g1 + KTR_PARM1] - stx %o0, [%g1 + KTR_PARM2] - stx %l3, [%g1 + KTR_PARM3] - stx %l1, [%g1 + KTR_PARM4] - stx %i6, [%g1 + KTR_PARM5] -9: -#endif - - wrpr %g0, 1, %tl -#if 0 - and %l5, WSTATE_OTHER_MASK, %l5 - wrpr %l5, WSTATE_KERNEL, %wstate -#endif - stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE] - stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL] - stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR] - stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR] - stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR] - - stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE] - stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC] - stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC] - stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL] - stx %l4, [%sp + SPOFF + CCFSZ + TF_Y] - - mov PCB_REG, %l0 - mov PCPU_REG, %l1 - wrpr %g0, PSTATE_NORMAL, %pstate - - stx %g6, [%sp + SPOFF + CCFSZ + TF_G6] - stx %g7, [%sp + SPOFF + CCFSZ + TF_G7] - - mov %l0, PCB_REG - mov %l1, PCPU_REG - wrpr %g0, PSTATE_KERNEL, %pstate - - stx %i0, [%sp + SPOFF + CCFSZ + TF_O0] - stx %i1, [%sp + SPOFF + CCFSZ + TF_O1] - stx %i2, [%sp + SPOFF + CCFSZ + TF_O2] - stx %i3, [%sp + SPOFF + CCFSZ + TF_O3] - stx %i4, [%sp + SPOFF + CCFSZ + TF_O4] - stx %i5, [%sp + SPOFF + CCFSZ + TF_O5] - stx %i6, [%sp + SPOFF + CCFSZ + TF_O6] - stx %i7, [%sp + SPOFF + CCFSZ + TF_O7] - - stx %g1, [%sp + SPOFF + CCFSZ + TF_G1] - stx %g2, [%sp + SPOFF + CCFSZ + TF_G2] - stx %g3, [%sp + SPOFF + CCFSZ + TF_G3] - stx %g4, [%sp + SPOFF + CCFSZ + TF_G4] - stx %g5, [%sp + SPOFF + CCFSZ + TF_G5] - - set tl1_ret - 8, %o7 - jmpl %o2, %g0 - add %sp, CCFSZ + SPOFF, %o0 -END(tl1_trap) - -ENTRY(tl1_ret) - ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0 - ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1 - ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2 - ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3 - ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4 - ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5 - ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6 - ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7 - - ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1 - ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2 - ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3 - ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4 - ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5 - - ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0 - ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1 - ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2 - ldx [%sp + SPOFF + CCFSZ + TF_PIL], %l3 - ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4 - - set VM_MIN_PROM_ADDRESS, %l5 - cmp %l1, %l5 - bl,a,pt %xcc, 1f - nop - - wrpr %g0, PSTATE_NORMAL, %pstate - - ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6 - ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7 - -1: -#if 0 - LOAD_ALT -#endif - andn %l0, TSTATE_CWP_MASK, %g1 - mov %l1, %g2 - mov %l2, %g3 - - wrpr %l3, 0, %pil - wr %l4, 0, %y - - restore - - wrpr %g0, 2, %tl - - rdpr %cwp, %g4 - wrpr %g1, %g4, %tstate - wrpr %g2, 0, %tpc - wrpr %g3, 0, %tnpc - -#if KTR_COMPILE & KTR_TRAP - CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx" - , %g2, %g3, %g4, 7, 8, 9) - ldx [PCPU(CURTHREAD)], %g3 - stx %g3, [%g2 + KTR_PARM1] - rdpr %pil, %g3 - stx %g3, [%g2 + KTR_PARM2] - rdpr %tstate, %g3 - stx %g3, [%g2 + KTR_PARM3] - rdpr %tpc, %g3 - stx %g3, [%g2 + KTR_PARM4] - stx %sp, [%g2 + KTR_PARM5] -9: -#endif - - retry -END(tl1_ret) - - -/* - * Freshly forked processes come here when switched to for the first time. - * The arguments to fork_exit() have been setup in the locals, we must move - * them to the outs. - */ -ENTRY(fork_trampoline) -#if KTR_COMPILE & KTR_PROC - CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx" - , %g1, %g2, %g3, 7, 8, 9) - ldx [PCPU(CURTHREAD)], %g2 - stx %g2, [%g1 + KTR_PARM1] - ldx [%g2 + TD_PROC], %g2 - add %g2, P_COMM, %g2 - stx %g2, [%g1 + KTR_PARM2] - rdpr %cwp, %g2 - stx %g2, [%g1 + KTR_PARM3] -9: -#endif - mov %l0, %o0 - mov %l1, %o1 - call fork_exit - mov %l2, %o2 - ba,a %xcc, tl0_ret - nop -END(fork_trampoline)
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200603050242.k252gko0008782>