Date: Fri, 1 Aug 2014 18:24:44 +0000 (UTC) From: Ian Lepore <ian@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r269390 - in head/sys: arm/arm arm/include libkern/arm Message-ID: <201408011824.s71IOiTi035821@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: ian Date: Fri Aug 1 18:24:44 2014 New Revision: 269390 URL: http://svnweb.freebsd.org/changeset/base/269390 Log: Fix unwind-info errors in our hand-written arm assembler code. We have functions nested within functions, and places where we start a function then never end it, we just jump to the middle of something else. We tried to express this with nested ENTRY()/END() macros (which result in .fnstart and .fnend directives), but it turns out there's no way to express that nesting in ARM EHABI unwind info, and newer tools treat multiple .fnstart directives without an intervening .fnend as an error. These changes introduce two new macros, EENTRY() and EEND(). EENTRY() creates a global label you can call/jump to just like ENTRY(), but it doesn't emit a .fnstart. EEND() is a no-op that just documents the conceptual endpoint that matches up with the same-named EENTRY(). This is based on patches submitted by Stepan Dyatkovskiy, but I made some changes and added the EEND() stuff, so blame any problems on me. Submitted by: Stepan Dyatkovskiy <stpworld@narod.ru> Modified: head/sys/arm/arm/cpufunc_asm_arm10.S head/sys/arm/arm/cpufunc_asm_arm9.S head/sys/arm/arm/cpufunc_asm_armv5.S head/sys/arm/arm/cpufunc_asm_armv6.S head/sys/arm/arm/cpufunc_asm_armv7.S head/sys/arm/arm/cpufunc_asm_xscale.S head/sys/arm/arm/cpufunc_asm_xscale_c3.S head/sys/arm/arm/exception.S head/sys/arm/arm/fusu.S head/sys/arm/arm/locore.S head/sys/arm/arm/setstack.s head/sys/arm/arm/support.S head/sys/arm/include/asm.h head/sys/libkern/arm/divsi3.S Modified: head/sys/arm/arm/cpufunc_asm_arm10.S ============================================================================== --- head/sys/arm/arm/cpufunc_asm_arm10.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/cpufunc_asm_arm10.S Fri Aug 1 18:24:44 2014 (r269390) @@ -209,7 +209,7 @@ ENTRY_NP(arm10_idcache_wbinv_all) mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to purge Dcache. */ -ENTRY(arm10_dcache_wbinv_all) +EENTRY(arm10_dcache_wbinv_all) .Larm10_dcache_wbinv_all: ldr ip, .Larm10_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} @@ -223,8 +223,8 @@ ENTRY(arm10_dcache_wbinv_all) bhs .Lnext_set_inv /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ bx lr +EEND(arm10_dcache_wbinv_all) END(arm10_idcache_wbinv_all) -END(arm10_dcache_wbinv_all) .Larm10_cache_data: .word _C_LABEL(arm10_dcache_sets_max) Modified: head/sys/arm/arm/cpufunc_asm_arm9.S ============================================================================== --- head/sys/arm/arm/cpufunc_asm_arm9.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/cpufunc_asm_arm9.S Fri Aug 1 18:24:44 2014 (r269390) @@ -197,7 +197,7 @@ ENTRY_NP(arm9_idcache_wbinv_all) mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through */ -ENTRY(arm9_dcache_wbinv_all) +EENTRY(arm9_dcache_wbinv_all) .Larm9_dcache_wbinv_all: ldr ip, .Larm9_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} @@ -210,8 +210,8 @@ ENTRY(arm9_dcache_wbinv_all) subs s_max, s_max, s_inc bhs .Lnext_set_inv /* Next set */ mov pc, lr +EEND(arm9_dcache_wbinv_all) END(arm9_idcache_wbinv_all) -END(arm9_dcache_wbinv_all) .Larm9_cache_data: .word _C_LABEL(arm9_dcache_sets_max) Modified: head/sys/arm/arm/cpufunc_asm_armv5.S ============================================================================== --- head/sys/arm/arm/cpufunc_asm_armv5.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/cpufunc_asm_armv5.S Fri Aug 1 18:24:44 2014 (r269390) @@ -194,6 +194,7 @@ ENTRY(armv5_idcache_wbinv_range) END(armv5_idcache_wbinv_range) ENTRY_NP(armv5_idcache_wbinv_all) +armv5_idcache_wbinv_all: .Larmv5_idcache_wbinv_all: /* * We assume that the code here can never be out of sync with the @@ -203,7 +204,7 @@ ENTRY_NP(armv5_idcache_wbinv_all) mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */ /* Fall through to purge Dcache. */ -ENTRY(armv5_dcache_wbinv_all) +EENTRY(armv5_dcache_wbinv_all) .Larmv5_dcache_wbinv_all: ldr ip, .Larmv5_cache_data ldmia ip, {s_max, i_max, s_inc, i_inc} @@ -219,8 +220,8 @@ ENTRY(armv5_dcache_wbinv_all) bpl 1b /* Next set */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET +EEND(armv5_dcache_wbinv_all) END(armv5_idcache_wbinv_all) -END(armv5_dcache_wbinv_all) .Larmv5_cache_data: .word _C_LABEL(armv5_dcache_sets_max) Modified: head/sys/arm/arm/cpufunc_asm_armv6.S ============================================================================== --- head/sys/arm/arm/cpufunc_asm_armv6.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/cpufunc_asm_armv6.S Fri Aug 1 18:24:44 2014 (r269390) @@ -137,12 +137,12 @@ ENTRY_NP(armv6_idcache_wbinv_all) /* Fall through to purge Dcache. */ /* LINTSTUB: void armv6_dcache_wbinv_all(void); */ -ENTRY(armv6_dcache_wbinv_all) +EENTRY(armv6_dcache_wbinv_all) mcr p15, 0, r0, c7, c14, 0 /* clean & invalidate D cache */ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ RET +EEND(armv6_dcache_wbinv_all) END(armv6_idcache_wbinv_all) -END(armv6_dcache_wbinv_all) ENTRY(armv6_idcache_inv_all) mov r0, #0 Modified: head/sys/arm/arm/cpufunc_asm_armv7.S ============================================================================== --- head/sys/arm/arm/cpufunc_asm_armv7.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/cpufunc_asm_armv7.S Fri Aug 1 18:24:44 2014 (r269390) @@ -358,7 +358,7 @@ ENTRY(armv7_idcache_inv_all) mcr p15, 0, r0, c7, c5, 0 @ invalidate instruction+branch cache isb @ instruction sync barrier bx lr @ return -END(armv7_l1cache_inv_all) +END(armv7_idcache_inv_all) ENTRY_NP(armv7_sleep) dsb Modified: head/sys/arm/arm/cpufunc_asm_xscale.S ============================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/cpufunc_asm_xscale.S Fri Aug 1 18:24:44 2014 (r269390) @@ -306,11 +306,12 @@ _C_LABEL(xscale_minidata_clean_size): XSCALE_CACHE_CLEAN_UNBLOCK ENTRY_NP(xscale_cache_syncI) -ENTRY_NP(xscale_cache_purgeID) + +EENTRY_NP(xscale_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ -ENTRY_NP(xscale_cache_cleanID) -ENTRY_NP(xscale_cache_purgeD) -ENTRY(xscale_cache_cleanD) +EENTRY_NP(xscale_cache_cleanID) +EENTRY_NP(xscale_cache_purgeD) +EENTRY(xscale_cache_cleanD) XSCALE_CACHE_CLEAN_PROLOGUE 1: subs r0, r0, #32 @@ -326,11 +327,11 @@ ENTRY(xscale_cache_cleanD) XSCALE_CACHE_CLEAN_EPILOGUE RET +EEND(xscale_cache_cleanD) +EEND(xscale_cache_purgeD) +EEND(xscale_cache_cleanID) +EEND(xscale_cache_purgeID) END(xscale_cache_syncI) -END(xscale_cache_purgeID) -END(xscale_cache_cleanID) -END(xscale_cache_purgeD) -END(xscale_cache_cleanD) /* * Clean the mini-data cache. @@ -374,7 +375,7 @@ END(xscale_cache_purgeD_E) */ /* xscale_cache_syncI is identical to xscale_cache_purgeID */ -ENTRY(xscale_cache_cleanID_rng) +EENTRY(xscale_cache_cleanID_rng) ENTRY(xscale_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscale_cache_cleanID) @@ -393,7 +394,7 @@ ENTRY(xscale_cache_cleanD_rng) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) -END(xscale_cache_cleanID_rng) +/*END(xscale_cache_cleanID_rng)*/ END(xscale_cache_cleanD_rng) ENTRY(xscale_cache_purgeID_rng) Modified: head/sys/arm/arm/cpufunc_asm_xscale_c3.S ============================================================================== --- head/sys/arm/arm/cpufunc_asm_xscale_c3.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/cpufunc_asm_xscale_c3.S Fri Aug 1 18:24:44 2014 (r269390) @@ -143,11 +143,12 @@ __FBSDID("$FreeBSD$"); ENTRY_NP(xscalec3_cache_syncI) -ENTRY_NP(xscalec3_cache_purgeID) +xscalec3_cache_purgeID: +EENTRY_NP(xscalec3_cache_purgeID) mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */ -ENTRY_NP(xscalec3_cache_cleanID) -ENTRY_NP(xscalec3_cache_purgeD) -ENTRY(xscalec3_cache_cleanD) +EENTRY_NP(xscalec3_cache_cleanID) +EENTRY_NP(xscalec3_cache_purgeD) +EENTRY(xscalec3_cache_cleanD) XSCALE_CACHE_CLEAN_BLOCK mov r0, #0 @@ -168,11 +169,11 @@ ENTRY(xscalec3_cache_cleanD) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ RET +EEND(xscalec3_cache_purgeID) +EEND(xscalec3_cache_cleanID) +EEND(xscalec3_cache_purgeD) +EEND(xscalec3_cache_cleanD) END(xscalec3_cache_syncI) -END(xscalec3_cache_purgeID) -END(xscalec3_cache_cleanID) -END(xscalec3_cache_purgeD) -END(xscalec3_cache_cleanD) ENTRY(xscalec3_cache_purgeID_rng) @@ -238,7 +239,7 @@ ENTRY(xscalec3_cache_purgeD_rng) END(xscalec3_cache_purgeD_rng) ENTRY(xscalec3_cache_cleanID_rng) -ENTRY(xscalec3_cache_cleanD_rng) +EENTRY(xscalec3_cache_cleanD_rng) cmp r1, #0x4000 bcs _C_LABEL(xscalec3_cache_cleanID) @@ -257,8 +258,8 @@ ENTRY(xscalec3_cache_cleanD_rng) mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ CPWAIT_AND_RETURN(r0) +EEND(xscalec3_cache_cleanD_rng) END(xscalec3_cache_cleanID_rng) -END(xscalec3_cache_cleanD_rng) ENTRY(xscalec3_l2cache_purge) /* Clean-up the L2 cache */ Modified: head/sys/arm/arm/exception.S ============================================================================== --- head/sys/arm/arm/exception.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/exception.S Fri Aug 1 18:24:44 2014 (r269390) @@ -280,12 +280,12 @@ ASENTRY_NP(swi_entry) * that a newly created thread appears to return from a SWI just like * the parent thread that created it. */ -ASENTRY_NP(swi_exit) +ASEENTRY_NP(swi_exit) DO_AST /* Handle pending signals. */ PULLFRAME /* Deallocate trapframe. */ movs pc, lr /* Return to userland. */ STOP_UNWINDING /* Don't unwind into user mode. */ -END(swi_exit) +EEND(swi_exit) END(swi_entry) /* Modified: head/sys/arm/arm/fusu.S ============================================================================== --- head/sys/arm/arm/fusu.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/fusu.S Fri Aug 1 18:24:44 2014 (r269390) @@ -54,8 +54,8 @@ __FBSDID("$FreeBSD$"); * Fetch an int from the user's address space. */ -ENTRY_NP(casuword32) ENTRY(casuword) +EENTRY_NP(casuword32) GET_PCB(r3) ldr r3, [r3] @@ -91,7 +91,7 @@ ENTRY(casuword) mov r1, #0x00000000 str r1, [r3, #PCB_ONFAULT] RET -END(casuword32) +EEND(casuword32) END(casuword) /* @@ -110,8 +110,8 @@ END(casuword) * Fetch an int from the user's address space. */ -ENTRY_NP(fuword32) ENTRY(fuword) +EENTRY_NP(fuword32) GET_PCB(r2) ldr r2, [r2] @@ -277,8 +277,8 @@ fusupcbfaulttext: * Store an int in the user's address space. */ -ENTRY_NP(suword32) ENTRY(suword) +EENTRY_NP(suword32) GET_PCB(r2) ldr r2, [r2] @@ -390,4 +390,3 @@ ENTRY(subyte) str r0, [r2, #PCB_ONFAULT] RET END(subyte) - Modified: head/sys/arm/arm/locore.S ============================================================================== --- head/sys/arm/arm/locore.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/locore.S Fri Aug 1 18:24:44 2014 (r269390) @@ -75,7 +75,8 @@ __FBSDID("$FreeBSD$"); * For both types of boot we gather up the args, put them in a struct arm_boot_params * structure and pass that to initarm. */ -ENTRY_NP(btext) + .globl btext +btext: ASENTRY_NP(_start) STOP_UNWINDING /* Can't unwind into the bootloader! */ @@ -285,7 +286,6 @@ virt_done: adr r0, .Lmainreturned b _C_LABEL(panic) /* NOTREACHED */ -END(btext) END(_start) /* @@ -548,7 +548,7 @@ ENTRY_NP(sigcode) /* Branch back to retry SYS_sigreturn */ b . - 16 - +END(sigcode) .word SYS_sigreturn .word SYS_exit @@ -560,5 +560,5 @@ ENTRY_NP(sigcode) .global szsigcode szsigcode: .long esigcode-sigcode -END(sigcode) + /* End of locore.S */ Modified: head/sys/arm/arm/setstack.s ============================================================================== --- head/sys/arm/arm/setstack.s Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/setstack.s Fri Aug 1 18:24:44 2014 (r269390) @@ -71,7 +71,7 @@ ENTRY(set_stackptr) msr cpsr_fsxc, r3 /* Restore the old mode */ mov pc, lr /* Exit */ - +END(set_stackptr) /* To get the stack pointer for a particular mode we must switch * to that mode copy the banked r13 and then switch back. * This routine provides an easy way of doing this for any mode @@ -90,5 +90,5 @@ ENTRY(get_stackptr) msr cpsr_fsxc, r3 /* Restore the old mode */ mov pc, lr /* Exit */ - +END(get_stackptr) /* End of setstack.S */ Modified: head/sys/arm/arm/support.S ============================================================================== --- head/sys/arm/arm/support.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/arm/support.S Fri Aug 1 18:24:44 2014 (r269390) @@ -130,7 +130,7 @@ ENTRY(bzero) .Lnormal0: mov r3, #0x00 b do_memset - +EEND(bzero) /* LINTSTUB: Func: void *memset(void *, int, size_t) */ ENTRY(memset) and r3, r1, #0xff /* We deal with bytes */ @@ -276,7 +276,6 @@ do_memset: strgeb r3, [ip], #0x01 /* Set another byte */ strgtb r3, [ip] /* and a third */ RET /* Exit */ -END(bzero) END(memset) ENTRY(bcmp) @@ -394,7 +393,7 @@ ENTRY(bcopy) eor r0, r1, r0 eor r1, r0, r1 eor r0, r1, r0 -ENTRY(memmove) +EENTRY(memmove) /* Do the buffers overlap? */ cmp r0, r1 RETeq /* Bail now if src/dst are the same */ @@ -931,8 +930,8 @@ ENTRY(memmove) .Lmemmove_bsrcul1l4: add r1, r1, #1 b .Lmemmove_bl4 +EEND(memmove) END(bcopy) -END(memmove) #if !defined(_ARM_ARCH_5E) ENTRY(memcpy) @@ -2945,13 +2944,17 @@ END(memcpy) ENTRY(user) nop +END(user) ENTRY(btrap) nop +END(btrap) ENTRY(etrap) nop +END(etrap) ENTRY(bintr) nop +END(bintr) ENTRY(eintr) nop - +END(eintr) #endif Modified: head/sys/arm/include/asm.h ============================================================================== --- head/sys/arm/include/asm.h Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/arm/include/asm.h Fri Aug 1 18:24:44 2014 (r269390) @@ -74,9 +74,20 @@ #define GLOBAL(X) .globl x #define _ENTRY(x) \ .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: _FNSTART - #define _END(x) .size x, . - x; _FNEND +/* + * EENTRY()/EEND() mark "extra" entry/exit points from a function. + * The unwind info cannot handle the concept of a nested function, or a function + * with multiple .fnstart directives, but some of our assembler code is written + * with multiple labels to allow entry at several points. The EENTRY() macro + * defines such an extra entry point without a new .fnstart, so that it's + * basically just a label that you can jump to. The EEND() macro does nothing + * at all, except document the exit point associated with the same-named entry. + */ +#define _EENTRY(x) .globl x; .type x,_ASM_TYPE_FUNCTION; x: +#define _EEND(x) /* nothing */ + #ifdef GPROF # define _PROF_PROLOGUE \ mov ip, lr; bl __mcount @@ -85,11 +96,17 @@ #endif #define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE +#define EENTRY(y) _EENTRY(_C_LABEL(y)); _PROF_PROLOGUE #define ENTRY_NP(y) _ENTRY(_C_LABEL(y)) +#define EENTRY_NP(y) _EENTRY(_C_LABEL(y)) #define END(y) _END(_C_LABEL(y)) +#define EEND(y) #define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE +#define ASEENTRY(y) _EENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE #define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y)) +#define ASEENTRY_NP(y) _EENTRY(_ASM_LABEL(y)) #define ASEND(y) _END(_ASM_LABEL(y)) +#define ASEEND(y) #define ASMSTR .asciz Modified: head/sys/libkern/arm/divsi3.S ============================================================================== --- head/sys/libkern/arm/divsi3.S Fri Aug 1 17:24:36 2014 (r269389) +++ head/sys/libkern/arm/divsi3.S Fri Aug 1 18:24:44 2014 (r269390) @@ -51,11 +51,11 @@ ENTRY_NP(__modsi3) RET END(__modsi3) +ENTRY_NP(__udivsi3) #ifdef __ARM_EABI__ -ENTRY_NP(__aeabi_uidiv) -ENTRY_NP(__aeabi_uidivmod) +EENTRY_NP(__aeabi_uidiv) +EENTRY_NP(__aeabi_uidivmod) #endif -ENTRY_NP(__udivsi3) .L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */ eor r0, r1, r0 eor r1, r0, r1 @@ -77,16 +77,16 @@ ENTRY_NP(__udivsi3) mov r1, #0 RET #ifdef __ARM_EABI__ -END(__aeabi_uidiv) -END(__aeabi_uidivmod) +EEND(__aeabi_uidiv) +EEND(__aeabi_uidivmod) #endif END(__udivsi3) +ENTRY_NP(__divsi3) #ifdef __ARM_EABI__ -ENTRY_NP(__aeabi_idiv) -ENTRY_NP(__aeabi_idivmod) +EENTRY_NP(__aeabi_idiv) +EENTRY_NP(__aeabi_idivmod) #endif -ENTRY_NP(__divsi3) .L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */ eor r0, r1, r0 eor r1, r0, r1 @@ -401,8 +401,8 @@ ENTRY_NP(__divsi3) mov r0, r3 RET #ifdef __ARM_EABI__ -END(__aeabi_idiv) -END(__aeabi_idivmod) +EEND(__aeabi_idiv) +EEND(__aeabi_idivmod) #endif END(__divsi3)
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201408011824.s71IOiTi035821>