From owner-svn-src-all@FreeBSD.ORG Tue Jan 17 00:27:33 2012 Return-Path: Delivered-To: svn-src-all@freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:4f8:fff6::34]) by hub.freebsd.org (Postfix) with ESMTP id 25335106566B; Tue, 17 Jan 2012 00:27:33 +0000 (UTC) (envelope-from alc@FreeBSD.org) Received: from svn.freebsd.org (svn.freebsd.org [IPv6:2001:4f8:fff6::2c]) by mx1.freebsd.org (Postfix) with ESMTP id 0E6B88FC0A; Tue, 17 Jan 2012 00:27:33 +0000 (UTC) Received: from svn.freebsd.org (localhost [127.0.0.1]) by svn.freebsd.org (8.14.4/8.14.4) with ESMTP id q0H0RWQF059018; Tue, 17 Jan 2012 00:27:32 GMT (envelope-from alc@svn.freebsd.org) Received: (from alc@localhost) by svn.freebsd.org (8.14.4/8.14.4/Submit) id q0H0RWRU059016; Tue, 17 Jan 2012 00:27:32 GMT (envelope-from alc@svn.freebsd.org) Message-Id: <201201170027.q0H0RWRU059016@svn.freebsd.org> From: Alan Cox Date: Tue, 17 Jan 2012 00:27:32 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Subject: svn commit: r230246 - head/sys/kern X-BeenThere: svn-src-all@freebsd.org X-Mailman-Version: 2.1.5 Precedence: list List-Id: "SVN commit messages for the entire src tree \(except for " user" and " projects" \)" List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 17 Jan 2012 00:27:33 -0000 Author: alc Date: Tue Jan 17 00:27:32 2012 New Revision: 230246 URL: http://svn.freebsd.org/changeset/base/230246 Log: Improve abstraction. Eliminate direct access by elf*_load_section() to an OBJT_VNODE-specific field of the vm object. The same information can be just as easily obtained from the struct vattr that is in struct image_params if the latter is passed to elf*_load_section(). Moreover, by replacing the vmspace and vm object parameters to elf*_load_section() with a struct image_params parameter, we actually reduce the size of the object code. In collaboration with: kib Modified: head/sys/kern/imgact_elf.c Modified: head/sys/kern/imgact_elf.c ============================================================================== --- head/sys/kern/imgact_elf.c Tue Jan 17 00:02:45 2012 (r230245) +++ head/sys/kern/imgact_elf.c Tue Jan 17 00:27:32 2012 (r230246) @@ -86,9 +86,9 @@ static Elf_Brandinfo *__elfN(get_brandin const char *interp, int32_t *osrel); static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry, size_t pagesize); -static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object, - vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, - vm_prot_t prot, size_t pagesize); +static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset, + caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, + size_t pagesize); static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel); @@ -445,13 +445,14 @@ __elfN(map_insert)(vm_map_t map, vm_obje } static int -__elfN(load_section)(struct vmspace *vmspace, - vm_object_t object, vm_offset_t offset, - caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, - size_t pagesize) +__elfN(load_section)(struct image_params *imgp, vm_offset_t offset, + caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot, + size_t pagesize) { struct sf_buf *sf; size_t map_len; + vm_map_t map; + vm_object_t object; vm_offset_t map_addr; int error, rv, cow; size_t copy_len; @@ -466,12 +467,13 @@ __elfN(load_section)(struct vmspace *vms * While I'm here, might as well check for something else that * is invalid: filsz cannot be greater than memsz. */ - if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || - filsz > memsz) { + if ((off_t)filsz + offset > imgp->attr->va_size || filsz > memsz) { uprintf("elf_load_section: truncated ELF file\n"); return (ENOEXEC); } + object = imgp->object; + map = &imgp->proc->p_vmspace->vm_map; map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize); file_addr = trunc_page_ps(offset, pagesize); @@ -491,7 +493,7 @@ __elfN(load_section)(struct vmspace *vms cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); - rv = __elfN(map_insert)(&vmspace->vm_map, + rv = __elfN(map_insert)(map, object, file_addr, /* file offset */ map_addr, /* virtual start */ @@ -521,8 +523,8 @@ __elfN(load_section)(struct vmspace *vms /* This had damn well better be true! */ if (map_len != 0) { - rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr, - map_addr + map_len, VM_PROT_ALL, 0); + rv = __elfN(map_insert)(map, NULL, 0, map_addr, map_addr + + map_len, VM_PROT_ALL, 0); if (rv != KERN_SUCCESS) { return (EINVAL); } @@ -550,8 +552,8 @@ __elfN(load_section)(struct vmspace *vms * set it to the specified protection. * XXX had better undo the damage from pasting over the cracks here! */ - vm_map_protect(&vmspace->vm_map, trunc_page(map_addr), - round_page(map_addr + map_len), prot, FALSE); + vm_map_protect(map, trunc_page(map_addr), round_page(map_addr + + map_len), prot, FALSE); return (0); } @@ -580,7 +582,6 @@ __elfN(load_file)(struct proc *p, const const Elf_Ehdr *hdr = NULL; const Elf_Phdr *phdr = NULL; struct nameidata *nd; - struct vmspace *vmspace = p->p_vmspace; struct vattr *attr; struct image_params *imgp; vm_prot_t prot; @@ -672,11 +673,10 @@ __elfN(load_file)(struct proc *p, const if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { /* Loadable segment */ prot = __elfN(trans_prot)(phdr[i].p_flags); - if ((error = __elfN(load_section)(vmspace, - imgp->object, phdr[i].p_offset, + error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase, - phdr[i].p_memsz, phdr[i].p_filesz, prot, - pagesize)) != 0) + phdr[i].p_memsz, phdr[i].p_filesz, prot, pagesize); + if (error != 0) goto fail; /* * Establish the base address if this is the @@ -810,8 +810,6 @@ __CONCAT(exec_, __elfN(imgact))(struct i if (error) return (error); - vmspace = imgp->proc->p_vmspace; - for (i = 0; i < hdr->e_phnum; i++) { switch (phdr[i].p_type) { case PT_LOAD: /* Loadable segment */ @@ -828,11 +826,11 @@ __CONCAT(exec_, __elfN(imgact))(struct i prot |= VM_PROT_EXECUTE; #endif - if ((error = __elfN(load_section)(vmspace, - imgp->object, phdr[i].p_offset, + error = __elfN(load_section)(imgp, phdr[i].p_offset, (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr, phdr[i].p_memsz, phdr[i].p_filesz, prot, - sv->sv_pagesize)) != 0) + sv->sv_pagesize); + if (error != 0) return (error); /* @@ -901,6 +899,7 @@ __CONCAT(exec_, __elfN(imgact))(struct i return (ENOMEM); } + vmspace = imgp->proc->p_vmspace; vmspace->vm_tsize = text_size >> PAGE_SHIFT; vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; vmspace->vm_dsize = data_size >> PAGE_SHIFT; @@ -912,8 +911,8 @@ __CONCAT(exec_, __elfN(imgact))(struct i * calculation is that it leaves room for the heap to grow to * its maximum allowed size. */ - addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr + - lim_max(imgp->proc, RLIMIT_DATA)); + addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(imgp->proc, + RLIMIT_DATA)); PROC_UNLOCK(imgp->proc); imgp->entry_addr = entry;