From owner-p4-projects@FreeBSD.ORG Mon Aug 2 19:04:37 2004 Return-Path: Delivered-To: p4-projects@freebsd.org Received: by hub.freebsd.org (Postfix, from userid 32767) id 92C6C16A4D0; Mon, 2 Aug 2004 19:04:37 +0000 (GMT) Delivered-To: perforce@freebsd.org Received: from mx1.FreeBSD.org (mx1.freebsd.org [216.136.204.125]) by hub.freebsd.org (Postfix) with ESMTP id 6D42316A4CE for ; Mon, 2 Aug 2004 19:04:37 +0000 (GMT) Received: from repoman.freebsd.org (repoman.freebsd.org [216.136.204.115]) by mx1.FreeBSD.org (Postfix) with ESMTP id 3EC6A43D75 for ; Mon, 2 Aug 2004 19:04:37 +0000 (GMT) (envelope-from dfr@freebsd.org) Received: from repoman.freebsd.org (localhost [127.0.0.1]) by repoman.freebsd.org (8.12.11/8.12.11) with ESMTP id i72J4ba2032298 for ; Mon, 2 Aug 2004 19:04:37 GMT (envelope-from dfr@freebsd.org) Received: (from perforce@localhost) by repoman.freebsd.org (8.12.11/8.12.11/Submit) id i72J4aJl032295 for perforce@freebsd.org; Mon, 2 Aug 2004 19:04:36 GMT (envelope-from dfr@freebsd.org) Date: Mon, 2 Aug 2004 19:04:36 GMT Message-Id: <200408021904.i72J4aJl032295@repoman.freebsd.org> X-Authentication-Warning: repoman.freebsd.org: perforce set sender to dfr@freebsd.org using -f From: Doug Rabson To: Perforce Change Reviews Subject: PERFORCE change 58720 for review X-BeenThere: p4-projects@freebsd.org X-Mailman-Version: 2.1.1 Precedence: list List-Id: p4 projects tree changes List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 02 Aug 2004 19:04:38 -0000 http://perforce.freebsd.org/chv.cgi?CH=58720 Change 58720 by dfr@dfr_home on 2004/08/02 19:04:28 Factor out allocate_tls() and free_tls() since there are only two implementations - Variant I and Variant II which can be shared between the various cpu arches. Fix all the tier one and tier two platforms so that they compile. Of these, amd64 and ia64 might work (i.e. the code is written but untested). Code needs to be added to alpha and sparc64 to support the TLS relocations. Affected files ... .. //depot/projects/kse/libexec/rtld-elf/alpha/reloc.c#3 edit .. //depot/projects/kse/libexec/rtld-elf/amd64/reloc.c#4 edit .. //depot/projects/kse/libexec/rtld-elf/i386/reloc.c#10 edit .. //depot/projects/kse/libexec/rtld-elf/ia64/reloc.c#6 edit .. //depot/projects/kse/libexec/rtld-elf/rtld.c#13 edit .. //depot/projects/kse/libexec/rtld-elf/rtld.h#8 edit .. //depot/projects/kse/libexec/rtld-elf/sparc64/reloc.c#4 edit Differences ... ==== //depot/projects/kse/libexec/rtld-elf/alpha/reloc.c#3 (text+ko) ==== @@ -509,36 +509,15 @@ void allocate_initial_tls(Obj_Entry *list) { - Obj_Entry *obj; - size_t size; - char *tls; - Elf_Addr *dtv; - union descriptor ldt; - Elf_Addr segbase; - int sel; + void *tls; - size = 0; - for (obj = list; obj; obj = obj->next) { - if (obj->tlsoffset + obj->tlssize > size) - size = obj->tlsoffset + obj->tlssize; - } - - tls = malloc(size); - dtv = malloc((tls_max_index + 2) * sizeof(Elf_Addr)); - - *(Elf_Addr**) tls = dtv; - - dtv[0] = tls_dtv_generation; - dtv[1] = tls_max_index; - for (obj = list; obj; obj = obj->next) { - Elf_Addr addr = tls + obj->tlsoffset; - memset((void*) (addr + obj->tlsinitsize), - 0, obj->tlssize - obj->tlsinitsize); - if (obj->tlsinit) - memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); - dtv[obj->tlsindex] = addr; - } - + /* + * Fix the size of the static TLS block by using the maximum + * offset allocated so far and adding a bit for dynamic modules to + * use. + */ + tls_static_space = tls_last_offset + tls_last_size + RTLS_STATIC_TLS_EXTRA; + tls = allocate_tls(list, 0, 16, 16); alpha_pal_wrunique((u_int64_t) tls); } @@ -546,5 +525,5 @@ { Elf_Addr** tp = (Elf_Addr**) alpha_pal_rdunique(); - return tls_get_addr_common(&tp[0], ti->ti_module, ti->ti_offset); + return tls_get_addr_common(tp, ti->ti_module, ti->ti_offset); } ==== //depot/projects/kse/libexec/rtld-elf/amd64/reloc.c#4 (text+ko) ==== @@ -33,7 +33,7 @@ #include #include -#include +#include #include #include @@ -214,7 +214,7 @@ const Elf_Sym *def; const Obj_Entry *defobj; - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); if (def == NULL) goto done; @@ -235,7 +235,8 @@ } } - *where = (Elf_Addr) (def->st_value - defobj->tlsoffset); + *where = (Elf_Addr) (def->st_value - defobj->tlsoffset + + rela->r_addend); } break; @@ -244,7 +245,7 @@ const Elf_Sym *def; const Obj_Entry *defobj; - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); if (def == NULL) goto done; @@ -265,7 +266,9 @@ } } - *where32 = (Elf32_Addr) (def->st_value - defobj->tlsoffset); + *where32 = (Elf32_Addr) (def->st_value - + defobj->tlsoffset + + rela->r_addend); } break; @@ -274,7 +277,7 @@ const Elf_Sym *def; const Obj_Entry *defobj; - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); if (def == NULL) goto done; @@ -288,12 +291,12 @@ const Elf_Sym *def; const Obj_Entry *defobj; - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); if (def == NULL) goto done; - *where += (Elf_Addr) def->st_value; + *where += (Elf_Addr) (def->st_value + rela->r_addend); } break; @@ -302,12 +305,12 @@ const Elf_Sym *def; const Obj_Entry *defobj; - def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); if (def == NULL) goto done; - *where32 += (Elf32_Addr) def->st_value; + *where32 += (Elf32_Addr) (def->st_value + rela->r_addend); } break; @@ -378,82 +381,17 @@ return 0; } -void * -allocate_tls(Objlist* list, size_t tcbsize, size_t tcbalign) -{ - Objlist_Entry *entry; - Obj_Entry *obj; - size_t size; - char *tls; - Elf_Addr *dtv; - Elf_Addr segbase, addr; - - size = round(tls_static_space, tcbalign); - - assert(tcbsize >= 2*sizeof(Elf_Addr)); - tls = malloc(size + tcbsize); - dtv = malloc((tls_max_index + 2) * sizeof(Elf_Addr)); - - segbase = (Elf_Addr)(tls + size); - ((Elf_Addr*)segbase)[0] = segbase; - ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; - - dtv[0] = tls_dtv_generation; - dtv[1] = tls_max_index; - STAILQ_FOREACH(entry, list, link) { - obj = entry->obj; - if (obj->tlsoffset) { - addr = segbase - obj->tlsoffset; - memset((void*) (addr + obj->tlsinitsize), - 0, obj->tlssize - obj->tlsinitsize); - if (obj->tlsinit) - memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); - dtv[obj->tlsindex + 1] = addr; - } else if (obj->tlsindex) { - dtv[obj->tlsindex + 1] = 0; - } - } - - return (void*) segbase; -} - void -free_tls(Objlist *list, void *tls, size_t tcbsize, size_t tcbalign) +allocate_initial_tls(Obj_Entry *objs) { - size_t size; - Elf_Addr* dtv; - int dtvsize, i; - Elf_Addr tlsstart, tlsend; - /* - * Figure out the size of the initial TLS block so that we can - * find stuff which ___tls_get_addr() allocated dynamically. - */ - size = round(tls_static_space, tcbalign); - - dtv = ((Elf_Addr**)tls)[1]; - dtvsize = dtv[1]; - tlsend = (Elf_Addr) tls; - tlsstart = tlsend - size; - for (i = 0; i < dtvsize; i++) { - if (dtv[i+2] < tlsstart || dtv[i+2] > tlsend) { - free((void*) dtv[i+2]); - } - } - - free((void*) tlsstart); -} - -void -allocate_initial_tls(Obj_Entry *list) -{ - /* * Fix the size of the static TLS block by using the maximum * offset allocated so far and adding a bit for dynamic modules to * use. */ - tls_static_space = tls_last_offset + 64; - amd64_set_fsbase(allocate_tls(list, 2*sizeof(Elf_Addr), 4)); + tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA; + amd64_set_fsbase(allocate_tls(objs, 0, + 2*sizeof(Elf_Addr), sizeof(Elf_Addr))); } void *__tls_get_addr(tls_index *ti) @@ -461,7 +399,7 @@ Elf_Addr** segbase; Elf_Addr* dtv; - __asm __volatile("movl %%fs:0, %0" : "=r" (segbase)); + __asm __volatile("movq %%fs:0, %0" : "=r" (segbase)); dtv = segbase[1]; return tls_get_addr_common(&segbase[1], ti->ti_module, ti->ti_offset); ==== //depot/projects/kse/libexec/rtld-elf/i386/reloc.c#10 (text+ko) ==== @@ -348,101 +348,7 @@ return 0; } -void * -allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) -{ - Obj_Entry *obj; - size_t size; - char *tls; - Elf_Addr *dtv, *olddtv; - Elf_Addr segbase, oldsegbase, addr; - int i; - - size = round(tls_static_space, tcbalign); - - assert(tcbsize >= 2*sizeof(Elf_Addr)); - tls = malloc(size + tcbsize); - dtv = malloc((tls_max_index + 2) * sizeof(Elf_Addr)); - - segbase = (Elf_Addr)(tls + size); - ((Elf_Addr*)segbase)[0] = segbase; - ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; - - dtv[0] = tls_dtv_generation; - dtv[1] = tls_max_index; - - if (oldtls) { - /* - * Copy the static TLS block over whole. - */ - oldsegbase = (Elf_Addr) oldtls; - memcpy((void *)(segbase - tls_static_space), - (const void *)(oldsegbase - tls_static_space), - tls_static_space); - - /* - * If any dynamic TLS blocks have been created tls_get_addr(), - * move them over. - */ - olddtv = ((Elf_Addr**)oldsegbase)[1]; - for (i = 0; i < olddtv[1]; i++) { - if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) { - dtv[i+2] = olddtv[i+2]; - olddtv[i+2] = 0; - } - } - - /* - * We assume that this block was the one we created below with - * allocate_initial_tls(). - */ - free_tls(oldtls, 2*sizeof(Elf_Addr), 4); - } else { - for (obj = objs; obj; obj = obj->next) { - if (obj->tlsoffset) { - addr = segbase - obj->tlsoffset; - memset((void*) (addr + obj->tlsinitsize), - 0, obj->tlssize - obj->tlsinitsize); - if (obj->tlsinit) - memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); - dtv[obj->tlsindex + 1] = addr; - } else if (obj->tlsindex) { - dtv[obj->tlsindex + 1] = 0; - } - } - } - - return (void*) segbase; -} - void -free_tls(void *tls, size_t tcbsize, size_t tcbalign) -{ - size_t size; - Elf_Addr* dtv; - int dtvsize, i; - Elf_Addr tlsstart, tlsend; - - /* - * Figure out the size of the initial TLS block so that we can - * find stuff which ___tls_get_addr() allocated dynamically. - */ - size = round(tls_static_space, tcbalign); - - dtv = ((Elf_Addr**)tls)[1]; - dtvsize = dtv[1]; - tlsend = (Elf_Addr) tls; - tlsstart = tlsend - size; - for (i = 0; i < dtvsize; i++) { - if (dtv[i+2] < tlsstart || dtv[i+2] > tlsend) { - free((void*) dtv[i+2]); - } - } - - free((void*) tlsstart); -} - -void allocate_initial_tls(Obj_Entry *objs) { void* tls; @@ -454,8 +360,8 @@ * offset allocated so far and adding a bit for dynamic modules to * use. */ - tls_static_space = tls_last_offset + 64; - tls = allocate_tls(objs, NULL, 2*sizeof(Elf_Addr), 4); + tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA; + tls = allocate_tls(objs, NULL, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); memset(&ldt, 0, sizeof(ldt)); ldt.sd.sd_lolimit = 0xffff; /* 4G limit */ ==== //depot/projects/kse/libexec/rtld-elf/ia64/reloc.c#6 (text+ko) ==== @@ -284,7 +284,6 @@ case R_IA64_DTPMOD64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; - Elf_Addr target; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); @@ -298,7 +297,6 @@ case R_IA64_DTPREL64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; - Elf_Addr target; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); @@ -312,7 +310,6 @@ case R_IA64_TPREL64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; - Elf_Addr target; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); @@ -617,91 +614,23 @@ } void -allocate_tls(Objlist *list, size_t tcbsize, size_t tcbalign) -{ - Objlist_Entry *entry; - Obj_Entry *obj; - size_t size; - char *tls; - Elf_Addr *dtv; - union descriptor ldt; - Elf_Addr segbase, addr; - int sel; - - assert(tcbsize == 16); - assert(tcbalign == 16); - - size = tls_static_space; - - tls = malloc(size); - dtv = malloc((tls_max_index + 2) * sizeof(Elf_Addr)); - - *(Elf_Addr**) tls = dtv; - - dtv[0] = tls_dtv_generation; - dtv[1] = tls_max_index; - STAILQ_FOREACH(entry, list, link) { - obj = entry->obj; - if (obj->tlsoffset) { - addr = tls + obj->tlsoffset; - memset((void*) (addr + obj->tlsinitsize), - 0, obj->tlssize - obj->tlsinitsize); - if (obj->tlsinit) - memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); - dtv[obj->tlsindex + 1] = addr; - } else if (obj->tlsindex) { - dtv[obj->tlsindex + 1] = 0; - } - } - - return tls; -} - -void -free_tls(Objlist *list, void *tls, size_t tcbsize, size_t tcbalign) -{ - size_t size; - Elf_Addr* dtv; - int dtvsize, i; - Elf_Addr tlsstart, tlsend; - - /* - * Figure out the size of the initial TLS block so that we can - * find stuff which __tls_get_addr() allocated dynamically. - */ - size = tls_static_space; - - dtv = ((Elf_Addr**)tls)[0]; - dtvsize = dtv[1]; - tlsstart = (Elf_Addr) tls; - tlsend = tlsstart + size; - for (i = 0; i < dtvsize; i++) { - if (dtv[i+2] < tlsstart || dtv[i+2] > tlsend) { - free((void*) dtv[i+2]); - } - } - - free((void*) tlsstart); -} - -void allocate_initial_tls(Obj_Entry *list) { - register struct Elf_Addr** tp __asm__("r13"); + register Elf_Addr** tp __asm__("r13"); /* * Fix the size of the static TLS block by using the maximum * offset allocated so far and adding a bit for dynamic modules to * use. */ - tls_static_space = tls_last_offset + tls_last_size + 64; + tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; - tp = allocate_tls(list, 16, 16); + tp = allocate_tls(list, 0, 16, 16); } -void *__tls_get_addr(unsigned int module, unsigned int offset) +void *__tls_get_addr(unsigned long module, unsigned long offset) { - register struct Elf_Addr** tp __asm__("r13"); + register Elf_Addr** tp __asm__("r13"); - return tls_get_addr_common(&tp[0], module, offset); + return tls_get_addr_common(tp, module, offset); } ==== //depot/projects/kse/libexec/rtld-elf/rtld.c#13 (text+ko) ==== @@ -2508,6 +2508,209 @@ return (void*) (dtv[index + 1] + offset); } +/* XXX not sure what variants to use for arm and powerpc. */ + +#if defined(__ia64__) || defined(__alpha__) + +/* + * Allocate Static TLS using the Variant I method. + */ +void * +allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) +{ + Obj_Entry *obj; + size_t size; + char *tls; + Elf_Addr *dtv, *olddtv; + Elf_Addr addr; + int i; + + assert(tcbsize == 16); + assert(tcbalign == 16); + + size = tls_static_space; + + tls = malloc(size); + dtv = malloc((tls_max_index + 2) * sizeof(Elf_Addr)); + + *(Elf_Addr**) tls = dtv; + + dtv[0] = tls_dtv_generation; + dtv[1] = tls_max_index; + + if (oldtls) { + /* + * Copy the static TLS block over whole. + */ + memcpy(tls + tcbsize, oldtls + tcbsize, tls_static_space - tcbsize); + + /* + * If any dynamic TLS blocks have been created tls_get_addr(), + * move them over. + */ + olddtv = *(Elf_Addr**) oldtls; + for (i = 0; i < olddtv[1]; i++) { + if (olddtv[i+2] < (Elf_Addr)oldtls || + olddtv[i+2] > (Elf_Addr)oldtls + tls_static_space) { + dtv[i+2] = olddtv[i+2]; + olddtv[i+2] = 0; + } + } + + /* + * We assume that all tls blocks are allocated with the same + * size and alignment. + */ + free_tls(oldtls, tcbsize, tcbalign); + } else { + for (obj = objs; obj; obj = obj->next) { + if (obj->tlsoffset) { + addr = (Elf_Addr)tls + obj->tlsoffset; + memset((void*) (addr + obj->tlsinitsize), + 0, obj->tlssize - obj->tlsinitsize); + if (obj->tlsinit) + memcpy((void*) addr, obj->tlsinit, + obj->tlsinitsize); + dtv[obj->tlsindex + 1] = addr; + } else if (obj->tlsindex) { + dtv[obj->tlsindex + 1] = 0; + } + } + } + + return tls; +} + +void +free_tls(void *tls, size_t tcbsize, size_t tcbalign) +{ + size_t size; + Elf_Addr* dtv; + int dtvsize, i; + Elf_Addr tlsstart, tlsend; + + /* + * Figure out the size of the initial TLS block so that we can + * find stuff which __tls_get_addr() allocated dynamically. + */ + size = tls_static_space; + + dtv = ((Elf_Addr**)tls)[0]; + dtvsize = dtv[1]; + tlsstart = (Elf_Addr) tls; + tlsend = tlsstart + size; + for (i = 0; i < dtvsize; i++) { + if (dtv[i+2] < tlsstart || dtv[i+2] > tlsend) { + free((void*) dtv[i+2]); + } + } + + free((void*) tlsstart); +} + +#endif + +#if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) + +/* + * Allocate Static TLS using the Variant II method. + */ +void * +allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) +{ + Obj_Entry *obj; + size_t size; + char *tls; + Elf_Addr *dtv, *olddtv; + Elf_Addr segbase, oldsegbase, addr; + int i; + + size = round(tls_static_space, tcbalign); + + assert(tcbsize >= 2*sizeof(Elf_Addr)); + tls = malloc(size + tcbsize); + dtv = malloc((tls_max_index + 2) * sizeof(Elf_Addr)); + + segbase = (Elf_Addr)(tls + size); + ((Elf_Addr*)segbase)[0] = segbase; + ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; + + dtv[0] = tls_dtv_generation; + dtv[1] = tls_max_index; + + if (oldtls) { + /* + * Copy the static TLS block over whole. + */ + oldsegbase = (Elf_Addr) oldtls; + memcpy((void *)(segbase - tls_static_space), + (const void *)(oldsegbase - tls_static_space), + tls_static_space); + + /* + * If any dynamic TLS blocks have been created tls_get_addr(), + * move them over. + */ + olddtv = ((Elf_Addr**)oldsegbase)[1]; + for (i = 0; i < olddtv[1]; i++) { + if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) { + dtv[i+2] = olddtv[i+2]; + olddtv[i+2] = 0; + } + } + + /* + * We assume that this block was the one we created with + * allocate_initial_tls(). + */ + free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); + } else { + for (obj = objs; obj; obj = obj->next) { + if (obj->tlsoffset) { + addr = segbase - obj->tlsoffset; + memset((void*) (addr + obj->tlsinitsize), + 0, obj->tlssize - obj->tlsinitsize); + if (obj->tlsinit) + memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); + dtv[obj->tlsindex + 1] = addr; + } else if (obj->tlsindex) { + dtv[obj->tlsindex + 1] = 0; + } + } + } + + return (void*) segbase; +} + +void +free_tls(void *tls, size_t tcbsize, size_t tcbalign) +{ + size_t size; + Elf_Addr* dtv; + int dtvsize, i; + Elf_Addr tlsstart, tlsend; + + /* + * Figure out the size of the initial TLS block so that we can + * find stuff which ___tls_get_addr() allocated dynamically. + */ + size = round(tls_static_space, tcbalign); + + dtv = ((Elf_Addr**)tls)[1]; + dtvsize = dtv[1]; + tlsend = (Elf_Addr) tls; + tlsstart = tlsend - size; + for (i = 0; i < dtvsize; i++) { + if (dtv[i+2] < tlsstart || dtv[i+2] > tlsend) { + free((void*) dtv[i+2]); + } + } + + free((void*) tlsstart); +} + +#endif + /* * Allocate TLS block for module with given index. */ ==== //depot/projects/kse/libexec/rtld-elf/rtld.h#8 (text+ko) ==== @@ -197,6 +197,8 @@ #define RTLD_MAGIC 0xd550b87a #define RTLD_VERSION 1 +#define RTLD_STATIC_TLS_EXTRA 64 + /* * Symbol cache entry used during relocation to avoid multiple lookups * of the same symbol. @@ -232,6 +234,8 @@ const Elf_Sym *symlook_obj(const char *, unsigned long, const Obj_Entry *, bool); void *tls_get_addr_common(Elf_Addr** dtvp, int index, size_t offset); +void *allocate_tls(Obj_Entry *, void *, size_t, size_t); +void free_tls(void *, size_t, size_t); void *allocate_module_tls(int index); bool allocate_tls_offset(Obj_Entry *obj); @@ -242,8 +246,6 @@ int reloc_non_plt(Obj_Entry *, Obj_Entry *); int reloc_plt(Obj_Entry *); int reloc_jmpslots(Obj_Entry *); -void *allocate_tls(Obj_Entry *, void *, size_t, size_t); -void free_tls(void *, size_t, size_t); void allocate_initial_tls(Obj_Entry *); #endif /* } */ ==== //depot/projects/kse/libexec/rtld-elf/sparc64/reloc.c#4 (text+ko) ==== @@ -719,47 +719,22 @@ } void -allocate_initial_tls(Obj_Entry *list) +allocate_initial_tls(Obj_Entry *objs) { - register struct Elf_Addr tp __asm__("%g7"); - Obj_Entry *obj; - size_t size; - char *tls; - Elf_Addr *dtv; - union descriptor ldt; - Elf_Addr segbase; - int sel; + register Elf_Addr** tp __asm__("%g7"); - size = 0; - for (obj = list; obj; obj = obj->next) { - if (obj->tlsoffset > size) - size = obj->tlsoffset; - } - - tls = malloc(size + 2*sizeof(Elf_Addr)); - dtv = malloc((tls_max_index + 2) * sizeof(Elf_Addr)); - - segbase = (Elf_Addr)(tls + size); - ((Elf_Addr*)segbase)[0] = segbase; - ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; - - dtv[0] = tls_dtv_generation; - dtv[1] = tls_max_index; - for (obj = list; obj; obj = obj->next) { - Elf_Addr addr = segbase - obj->tlsoffset; - memset((void*) (addr + obj->tlsinitsize), - 0, obj->tlssize - obj->tlsinitsize); - if (obj->tlsinit) - memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); - dtv[obj->tlsindex] = addr; - } - - tp = segbase; + /* + * Fix the size of the static TLS block by using the maximum + * offset allocated so far and adding a bit for dynamic modules to + * use. + */ + tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA; + tp = allocate_tls(objs, NULL, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); } void *__tls_get_addr(tls_index *ti) { - register struct Elf_Addr** tp __asm__("%g7"); + register Elf_Addr** tp __asm__("%g7"); - return tls_get_addr_common(&tp[0], ti->ti_module, ti->ti_offset); + return tls_get_addr_common(tp, ti->ti_module, ti->ti_offset); }