From owner-svn-src-head@freebsd.org Fri Feb 5 20:38:10 2016 Return-Path: Delivered-To: svn-src-head@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 8F8D5A9C147; Fri, 5 Feb 2016 20:38:10 +0000 (UTC) (envelope-from jhb@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 5739B18D3; Fri, 5 Feb 2016 20:38:10 +0000 (UTC) (envelope-from jhb@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id u15Kc9kZ050031; Fri, 5 Feb 2016 20:38:09 GMT (envelope-from jhb@FreeBSD.org) Received: (from jhb@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id u15Kc96f050029; Fri, 5 Feb 2016 20:38:09 GMT (envelope-from jhb@FreeBSD.org) Message-Id: <201602052038.u15Kc96f050029@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: jhb set sender to jhb@FreeBSD.org using -f From: John Baldwin Date: Fri, 5 Feb 2016 20:38:09 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r295331 - in head/sys: kern sys X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 05 Feb 2016 20:38:10 -0000 Author: jhb Date: Fri Feb 5 20:38:09 2016 New Revision: 295331 URL: https://svnweb.freebsd.org/changeset/base/295331 Log: Rename aiocblist to kaiocb and use consistent variable names. Typically list is used for a structure that holds a list head in FreeBSD, not for members of a list. As such, rename 'struct aiocblist' to 'struct kaiocb' (the kernel version of 'struct aiocb'). While here, use more consistent variable names for AIO control blocks: - Use 'job' instead of 'aiocbe', 'cb', 'cbe', or 'iocb' for kernel job objects. - Use 'jobn' instead of 'cbn' for use with TAILQ_FOREACH_SAFE(). - Use 'sjob' and 'sjobn' instead of 'scb' and 'scbn' for fsync jobs. - Use 'ujob' instead of 'aiocbp', 'job', 'uaiocb', or 'uuaiocb' to hold a user pointer to a 'struct aiocb'. - Use 'ujobp' instead of 'aiocbp' for a user pointer to a 'struct aiocb *'. Reviewed by: kib Sponsored by: Chelsio Communications Differential Revision: https://reviews.freebsd.org/D5125 Modified: head/sys/kern/vfs_aio.c head/sys/sys/event.h head/sys/sys/socketvar.h Modified: head/sys/kern/vfs_aio.c ============================================================================== --- head/sys/kern/vfs_aio.c Fri Feb 5 19:35:53 2016 (r295330) +++ head/sys/kern/vfs_aio.c Fri Feb 5 20:38:09 2016 (r295331) @@ -196,7 +196,7 @@ typedef struct oaiocb { } oaiocb_t; /* - * Below is a key of locks used to protect each member of struct aiocblist + * Below is a key of locks used to protect each member of struct kaiocb * aioliojob and kaioinfo and any backends. * * * - need not protected @@ -219,10 +219,10 @@ typedef struct oaiocb { * daemons. */ -struct aiocblist { - TAILQ_ENTRY(aiocblist) list; /* (b) internal list of for backend */ - TAILQ_ENTRY(aiocblist) plist; /* (a) list of jobs for each backend */ - TAILQ_ENTRY(aiocblist) allist; /* (a) list of all jobs in proc */ +struct kaiocb { + TAILQ_ENTRY(kaiocb) list; /* (b) internal list of for backend */ + TAILQ_ENTRY(kaiocb) plist; /* (a) list of jobs for each backend */ + TAILQ_ENTRY(kaiocb) allist; /* (a) list of all jobs in proc */ int jobflags; /* (a) job flags */ int jobstate; /* (b) job state */ int inputcharge; /* (*) input blockes */ @@ -235,7 +235,7 @@ struct aiocblist { struct ucred *cred; /* (*) active credential when created */ struct file *fd_file; /* (*) pointer to file structure */ struct aioliojob *lio; /* (*) optional lio job */ - struct aiocb *uuaiocb; /* (*) pointer in userspace of aiocb */ + struct aiocb *ujob; /* (*) pointer in userspace of aiocb */ struct knlist klist; /* (a) list of knotes */ struct aiocb uaiocb; /* (*) kernel I/O control block */ ksiginfo_t ksi; /* (a) realtime signal info */ @@ -244,10 +244,10 @@ struct aiocblist { }; /* jobflags */ -#define AIOCBLIST_DONE 0x01 -#define AIOCBLIST_BUFDONE 0x02 -#define AIOCBLIST_RUNDOWN 0x04 -#define AIOCBLIST_CHECKSYNC 0x08 +#define KAIOCB_DONE 0x01 +#define KAIOCB_BUFDONE 0x02 +#define KAIOCB_RUNDOWN 0x04 +#define KAIOCB_CHECKSYNC 0x08 /* * AIO process info @@ -289,12 +289,12 @@ struct kaioinfo { int kaio_count; /* (a) size of AIO queue */ int kaio_ballowed_count; /* (*) maximum number of buffers */ int kaio_buffer_count; /* (a) number of physio buffers */ - TAILQ_HEAD(,aiocblist) kaio_all; /* (a) all AIOs in a process */ - TAILQ_HEAD(,aiocblist) kaio_done; /* (a) done queue for process */ + TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */ + TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */ TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */ - TAILQ_HEAD(,aiocblist) kaio_jobqueue; /* (a) job queue for process */ - TAILQ_HEAD(,aiocblist) kaio_bufqueue; /* (a) buffer job queue */ - TAILQ_HEAD(,aiocblist) kaio_syncqueue; /* (a) queue for aio_fsync */ + TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */ + TAILQ_HEAD(,kaiocb) kaio_bufqueue; /* (a) buffer job queue */ + TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */ struct task kaio_task; /* (*) task to kick aio processes */ }; @@ -323,28 +323,28 @@ struct aiocb_ops { static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */ static struct sema aio_newproc_sem; static struct mtx aio_job_mtx; -static TAILQ_HEAD(,aiocblist) aio_jobs; /* (c) Async job list */ +static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */ static struct unrhdr *aiod_unr; void aio_init_aioinfo(struct proc *p); static int aio_onceonly(void); -static int aio_free_entry(struct aiocblist *aiocbe); -static void aio_process_rw(struct aiocblist *aiocbe); -static void aio_process_sync(struct aiocblist *aiocbe); -static void aio_process_mlock(struct aiocblist *aiocbe); +static int aio_free_entry(struct kaiocb *job); +static void aio_process_rw(struct kaiocb *job); +static void aio_process_sync(struct kaiocb *job); +static void aio_process_mlock(struct kaiocb *job); static int aio_newproc(int *); -int aio_aqueue(struct thread *td, struct aiocb *job, +int aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lio, int type, struct aiocb_ops *ops); static void aio_physwakeup(struct bio *bp); static void aio_proc_rundown(void *arg, struct proc *p); static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp); -static int aio_qphysio(struct proc *p, struct aiocblist *iocb); +static int aio_qphysio(struct proc *p, struct kaiocb *job); static void aio_daemon(void *param); static void aio_swake_cb(struct socket *, struct sockbuf *); static int aio_unload(void); -static void aio_bio_done_notify(struct proc *userp, - struct aiocblist *aiocbe, int type); +static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job, + int type); #define DONE_BUF 1 #define DONE_QUEUE 2 static int aio_kick(struct proc *userp); @@ -488,7 +488,7 @@ aio_onceonly(void) NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); - aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL, + aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiol_zone = uma_zcreate("AIOL", AIO_LISTIO_MAX*sizeof(intptr_t) , NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); @@ -625,29 +625,29 @@ aio_sendsig(struct proc *p, struct sigev * restart the queue scan. */ static int -aio_free_entry(struct aiocblist *aiocbe) +aio_free_entry(struct kaiocb *job) { struct kaioinfo *ki; struct aioliojob *lj; struct proc *p; - p = aiocbe->userproc; + p = job->userproc; MPASS(curproc == p); ki = p->p_aioinfo; MPASS(ki != NULL); AIO_LOCK_ASSERT(ki, MA_OWNED); - MPASS(aiocbe->jobstate == JOBST_JOBFINISHED); + MPASS(job->jobstate == JOBST_JOBFINISHED); atomic_subtract_int(&num_queue_count, 1); ki->kaio_count--; MPASS(ki->kaio_count >= 0); - TAILQ_REMOVE(&ki->kaio_done, aiocbe, plist); - TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist); + TAILQ_REMOVE(&ki->kaio_done, job, plist); + TAILQ_REMOVE(&ki->kaio_all, job, allist); - lj = aiocbe->lio; + lj = job->lio; if (lj) { lj->lioj_count--; lj->lioj_finished_count--; @@ -663,14 +663,14 @@ aio_free_entry(struct aiocblist *aiocbe) } } - /* aiocbe is going away, we need to destroy any knotes */ - knlist_delete(&aiocbe->klist, curthread, 1); + /* job is going away, we need to destroy any knotes */ + knlist_delete(&job->klist, curthread, 1); PROC_LOCK(p); - sigqueue_take(&aiocbe->ksi); + sigqueue_take(&job->ksi); PROC_UNLOCK(p); - MPASS(aiocbe->bp == NULL); - aiocbe->jobstate = JOBST_NULL; + MPASS(job->bp == NULL); + job->jobstate = JOBST_NULL; AIO_UNLOCK(ki); /* @@ -682,7 +682,7 @@ aio_free_entry(struct aiocblist *aiocbe) * another process. * * Currently, all the callers of this function call it to remove - * an aiocblist from the current process' job list either via a + * a kaiocb from the current process' job list either via a * syscall or due to the current process calling exit() or * execve(). Thus, we know that p == curproc. We also know that * curthread can't exit since we are curthread. @@ -693,10 +693,10 @@ aio_free_entry(struct aiocblist *aiocbe) * at open time, but this is already true of file descriptors in * a multithreaded process. */ - if (aiocbe->fd_file) - fdrop(aiocbe->fd_file, curthread); - crfree(aiocbe->cred); - uma_zfree(aiocb_zone, aiocbe); + if (job->fd_file) + fdrop(job->fd_file, curthread); + crfree(job->cred); + uma_zfree(aiocb_zone, job); AIO_LOCK(ki); return (0); @@ -717,7 +717,7 @@ aio_proc_rundown(void *arg, struct proc { struct kaioinfo *ki; struct aioliojob *lj; - struct aiocblist *cbe, *cbn; + struct kaiocb *job, *jobn; struct file *fp; struct socket *so; int remove; @@ -737,30 +737,30 @@ restart: * Try to cancel all pending requests. This code simulates * aio_cancel on all pending I/O requests. */ - TAILQ_FOREACH_SAFE(cbe, &ki->kaio_jobqueue, plist, cbn) { + TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { remove = 0; mtx_lock(&aio_job_mtx); - if (cbe->jobstate == JOBST_JOBQGLOBAL) { - TAILQ_REMOVE(&aio_jobs, cbe, list); + if (job->jobstate == JOBST_JOBQGLOBAL) { + TAILQ_REMOVE(&aio_jobs, job, list); remove = 1; - } else if (cbe->jobstate == JOBST_JOBQSOCK) { - fp = cbe->fd_file; + } else if (job->jobstate == JOBST_JOBQSOCK) { + fp = job->fd_file; MPASS(fp->f_type == DTYPE_SOCKET); so = fp->f_data; - TAILQ_REMOVE(&so->so_aiojobq, cbe, list); + TAILQ_REMOVE(&so->so_aiojobq, job, list); remove = 1; - } else if (cbe->jobstate == JOBST_JOBQSYNC) { - TAILQ_REMOVE(&ki->kaio_syncqueue, cbe, list); + } else if (job->jobstate == JOBST_JOBQSYNC) { + TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); remove = 1; } mtx_unlock(&aio_job_mtx); if (remove) { - cbe->jobstate = JOBST_JOBFINISHED; - cbe->uaiocb._aiocb_private.status = -1; - cbe->uaiocb._aiocb_private.error = ECANCELED; - TAILQ_REMOVE(&ki->kaio_jobqueue, cbe, plist); - aio_bio_done_notify(p, cbe, DONE_QUEUE); + job->jobstate = JOBST_JOBFINISHED; + job->uaiocb._aiocb_private.status = -1; + job->uaiocb._aiocb_private.error = ECANCELED; + TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); + aio_bio_done_notify(p, job, DONE_QUEUE); } } @@ -773,8 +773,8 @@ restart: } /* Free all completed I/O requests. */ - while ((cbe = TAILQ_FIRST(&ki->kaio_done)) != NULL) - aio_free_entry(cbe); + while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL) + aio_free_entry(job); while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) { if (lj->lioj_count == 0) { @@ -799,27 +799,27 @@ restart: /* * Select a job to run (called by an AIO daemon). */ -static struct aiocblist * +static struct kaiocb * aio_selectjob(struct aioproc *aiop) { - struct aiocblist *aiocbe; + struct kaiocb *job; struct kaioinfo *ki; struct proc *userp; mtx_assert(&aio_job_mtx, MA_OWNED); - TAILQ_FOREACH(aiocbe, &aio_jobs, list) { - userp = aiocbe->userproc; + TAILQ_FOREACH(job, &aio_jobs, list) { + userp = job->userproc; ki = userp->p_aioinfo; if (ki->kaio_active_count < ki->kaio_maxactive_count) { - TAILQ_REMOVE(&aio_jobs, aiocbe, list); + TAILQ_REMOVE(&aio_jobs, job, list); /* Account for currently active jobs. */ ki->kaio_active_count++; - aiocbe->jobstate = JOBST_JOBRUNNING; + job->jobstate = JOBST_JOBRUNNING; break; } } - return (aiocbe); + return (job); } /* @@ -857,7 +857,7 @@ drop: * XXX I don't think it works well for socket, pipe, and fifo. */ static void -aio_process_rw(struct aiocblist *aiocbe) +aio_process_rw(struct kaiocb *job) { struct ucred *td_savedcred; struct thread *td; @@ -871,15 +871,15 @@ aio_process_rw(struct aiocblist *aiocbe) int oublock_st, oublock_end; int inblock_st, inblock_end; - KASSERT(aiocbe->uaiocb.aio_lio_opcode == LIO_READ || - aiocbe->uaiocb.aio_lio_opcode == LIO_WRITE, - ("%s: opcode %d", __func__, aiocbe->uaiocb.aio_lio_opcode)); + KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || + job->uaiocb.aio_lio_opcode == LIO_WRITE, + ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); td = curthread; td_savedcred = td->td_ucred; - td->td_ucred = aiocbe->cred; - cb = &aiocbe->uaiocb; - fp = aiocbe->fd_file; + td->td_ucred = job->cred; + cb = &job->uaiocb; + fp = job->fd_file; aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; aiov.iov_len = cb->aio_nbytes; @@ -913,8 +913,8 @@ aio_process_rw(struct aiocblist *aiocbe) inblock_end = td->td_ru.ru_inblock; oublock_end = td->td_ru.ru_oublock; - aiocbe->inputcharge = inblock_end - inblock_st; - aiocbe->outputcharge = oublock_end - oublock_st; + job->inputcharge = inblock_end - inblock_st; + job->outputcharge = oublock_end - oublock_st; if ((error) && (auio.uio_resid != cnt)) { if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) @@ -927,9 +927,9 @@ aio_process_rw(struct aiocblist *aiocbe) sigpipe = 0; } if (sigpipe) { - PROC_LOCK(aiocbe->userproc); - kern_psignal(aiocbe->userproc, SIGPIPE); - PROC_UNLOCK(aiocbe->userproc); + PROC_LOCK(job->userproc); + kern_psignal(job->userproc, SIGPIPE); + PROC_UNLOCK(job->userproc); } } } @@ -941,18 +941,18 @@ aio_process_rw(struct aiocblist *aiocbe) } static void -aio_process_sync(struct aiocblist *aiocbe) +aio_process_sync(struct kaiocb *job) { struct thread *td = curthread; struct ucred *td_savedcred = td->td_ucred; - struct aiocb *cb = &aiocbe->uaiocb; - struct file *fp = aiocbe->fd_file; + struct aiocb *cb = &job->uaiocb; + struct file *fp = job->fd_file; int error = 0; - KASSERT(aiocbe->uaiocb.aio_lio_opcode == LIO_SYNC, - ("%s: opcode %d", __func__, aiocbe->uaiocb.aio_lio_opcode)); + KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC, + ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); - td->td_ucred = aiocbe->cred; + td->td_ucred = job->cred; if (fp->f_vnode != NULL) error = aio_fsync_vnode(td, fp->f_vnode); cb->_aiocb_private.error = error; @@ -961,31 +961,31 @@ aio_process_sync(struct aiocblist *aiocb } static void -aio_process_mlock(struct aiocblist *aiocbe) +aio_process_mlock(struct kaiocb *job) { - struct aiocb *cb = &aiocbe->uaiocb; + struct aiocb *cb = &job->uaiocb; int error; - KASSERT(aiocbe->uaiocb.aio_lio_opcode == LIO_MLOCK, - ("%s: opcode %d", __func__, aiocbe->uaiocb.aio_lio_opcode)); + KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK, + ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); - error = vm_mlock(aiocbe->userproc, aiocbe->cred, + error = vm_mlock(job->userproc, job->cred, __DEVOLATILE(void *, cb->aio_buf), cb->aio_nbytes); cb->_aiocb_private.error = error; cb->_aiocb_private.status = 0; } static void -aio_bio_done_notify(struct proc *userp, struct aiocblist *aiocbe, int type) +aio_bio_done_notify(struct proc *userp, struct kaiocb *job, int type) { struct aioliojob *lj; struct kaioinfo *ki; - struct aiocblist *scb, *scbn; + struct kaiocb *sjob, *sjobn; int lj_done; ki = userp->p_aioinfo; AIO_LOCK_ASSERT(ki, MA_OWNED); - lj = aiocbe->lio; + lj = job->lio; lj_done = 0; if (lj) { lj->lioj_finished_count++; @@ -993,21 +993,21 @@ aio_bio_done_notify(struct proc *userp, lj_done = 1; } if (type == DONE_QUEUE) { - aiocbe->jobflags |= AIOCBLIST_DONE; + job->jobflags |= KAIOCB_DONE; } else { - aiocbe->jobflags |= AIOCBLIST_BUFDONE; + job->jobflags |= KAIOCB_BUFDONE; } - TAILQ_INSERT_TAIL(&ki->kaio_done, aiocbe, plist); - aiocbe->jobstate = JOBST_JOBFINISHED; + TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist); + job->jobstate = JOBST_JOBFINISHED; if (ki->kaio_flags & KAIO_RUNDOWN) goto notification_done; - if (aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || - aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) - aio_sendsig(userp, &aiocbe->uaiocb.aio_sigevent, &aiocbe->ksi); + if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || + job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) + aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi); - KNOTE_LOCKED(&aiocbe->klist, 1); + KNOTE_LOCKED(&job->klist, 1); if (lj_done) { if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { @@ -1024,16 +1024,16 @@ aio_bio_done_notify(struct proc *userp, } notification_done: - if (aiocbe->jobflags & AIOCBLIST_CHECKSYNC) { - TAILQ_FOREACH_SAFE(scb, &ki->kaio_syncqueue, list, scbn) { - if (aiocbe->fd_file == scb->fd_file && - aiocbe->seqno < scb->seqno) { - if (--scb->pending == 0) { + if (job->jobflags & KAIOCB_CHECKSYNC) { + TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) { + if (job->fd_file == sjob->fd_file && + job->seqno < sjob->seqno) { + if (--sjob->pending == 0) { mtx_lock(&aio_job_mtx); - scb->jobstate = JOBST_JOBQGLOBAL; - TAILQ_REMOVE(&ki->kaio_syncqueue, scb, + sjob->jobstate = JOBST_JOBQGLOBAL; + TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list); - TAILQ_INSERT_TAIL(&aio_jobs, scb, list); + TAILQ_INSERT_TAIL(&aio_jobs, sjob, list); aio_kick_nowait(userp); mtx_unlock(&aio_job_mtx); } @@ -1047,10 +1047,10 @@ notification_done: } static void -aio_switch_vmspace(struct aiocblist *aiocbe) +aio_switch_vmspace(struct kaiocb *job) { - vmspace_switch_aio(aiocbe->userproc->p_vmspace); + vmspace_switch_aio(job->userproc->p_vmspace); } /* @@ -1060,7 +1060,7 @@ aio_switch_vmspace(struct aiocblist *aio static void aio_daemon(void *_id) { - struct aiocblist *aiocbe; + struct kaiocb *job; struct aioproc *aiop; struct kaioinfo *ki; struct proc *p, *userp; @@ -1105,28 +1105,28 @@ aio_daemon(void *_id) /* * Check for jobs. */ - while ((aiocbe = aio_selectjob(aiop)) != NULL) { + while ((job = aio_selectjob(aiop)) != NULL) { mtx_unlock(&aio_job_mtx); - userp = aiocbe->userproc; + userp = job->userproc; /* * Connect to process address space for user program. */ - aio_switch_vmspace(aiocbe); + aio_switch_vmspace(job); ki = userp->p_aioinfo; /* Do the I/O function. */ - switch(aiocbe->uaiocb.aio_lio_opcode) { + switch(job->uaiocb.aio_lio_opcode) { case LIO_READ: case LIO_WRITE: - aio_process_rw(aiocbe); + aio_process_rw(job); break; case LIO_SYNC: - aio_process_sync(aiocbe); + aio_process_sync(job); break; case LIO_MLOCK: - aio_process_mlock(aiocbe); + aio_process_mlock(job); break; } @@ -1136,8 +1136,8 @@ aio_daemon(void *_id) mtx_unlock(&aio_job_mtx); AIO_LOCK(ki); - TAILQ_REMOVE(&ki->kaio_jobqueue, aiocbe, plist); - aio_bio_done_notify(userp, aiocbe, DONE_QUEUE); + TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); + aio_bio_done_notify(userp, job, DONE_QUEUE); AIO_UNLOCK(ki); mtx_lock(&aio_job_mtx); @@ -1226,7 +1226,7 @@ aio_newproc(int *start) * duration of this call. */ static int -aio_qphysio(struct proc *p, struct aiocblist *aiocbe) +aio_qphysio(struct proc *p, struct kaiocb *job) { struct aiocb *cb; struct file *fp; @@ -1240,8 +1240,8 @@ aio_qphysio(struct proc *p, struct aiocb int error, ref, unmap, poff; vm_prot_t prot; - cb = &aiocbe->uaiocb; - fp = aiocbe->fd_file; + cb = &job->uaiocb; + fp = job->fd_file; if (fp == NULL || fp->f_type != DTYPE_VNODE) return (-1); @@ -1286,9 +1286,9 @@ aio_qphysio(struct proc *p, struct aiocb goto unref; } } - aiocbe->bp = bp = g_alloc_bio(); + job->bp = bp = g_alloc_bio(); if (!unmap) { - aiocbe->pbuf = pbuf = (struct buf *)getpbuf(NULL); + job->pbuf = pbuf = (struct buf *)getpbuf(NULL); BUF_KERNPROC(pbuf); } @@ -1296,12 +1296,12 @@ aio_qphysio(struct proc *p, struct aiocb ki->kaio_count++; if (!unmap) ki->kaio_buffer_count++; - lj = aiocbe->lio; + lj = job->lio; if (lj) lj->lioj_count++; - TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, aiocbe, plist); - TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); - aiocbe->jobstate = JOBST_JOBQBUF; + TAILQ_INSERT_TAIL(&ki->kaio_bufqueue, job, plist); + TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); + job->jobstate = JOBST_JOBQBUF; cb->_aiocb_private.status = cb->aio_nbytes; AIO_UNLOCK(ki); @@ -1312,25 +1312,25 @@ aio_qphysio(struct proc *p, struct aiocb bp->bio_offset = cb->aio_offset; bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; bp->bio_dev = dev; - bp->bio_caller1 = (void *)aiocbe; + bp->bio_caller1 = (void *)job; prot = VM_PROT_READ; if (cb->aio_lio_opcode == LIO_READ) prot |= VM_PROT_WRITE; /* Less backwards than it looks */ - if ((aiocbe->npages = vm_fault_quick_hold_pages( + if ((job->npages = vm_fault_quick_hold_pages( &curproc->p_vmspace->vm_map, - (vm_offset_t)bp->bio_data, bp->bio_length, prot, aiocbe->pages, - sizeof(aiocbe->pages)/sizeof(aiocbe->pages[0]))) < 0) { + (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages, + sizeof(job->pages)/sizeof(job->pages[0]))) < 0) { error = EFAULT; goto doerror; } if (!unmap) { pmap_qenter((vm_offset_t)pbuf->b_data, - aiocbe->pages, aiocbe->npages); + job->pages, job->npages); bp->bio_data = pbuf->b_data + poff; } else { - bp->bio_ma = aiocbe->pages; - bp->bio_ma_n = aiocbe->npages; + bp->bio_ma = job->pages; + bp->bio_ma_n = job->npages; bp->bio_ma_offset = poff; bp->bio_data = unmapped_buf; bp->bio_flags |= BIO_UNMAPPED; @@ -1347,9 +1347,9 @@ aio_qphysio(struct proc *p, struct aiocb doerror: AIO_LOCK(ki); - aiocbe->jobstate = JOBST_NULL; - TAILQ_REMOVE(&ki->kaio_bufqueue, aiocbe, plist); - TAILQ_REMOVE(&ki->kaio_all, aiocbe, allist); + job->jobstate = JOBST_NULL; + TAILQ_REMOVE(&ki->kaio_bufqueue, job, plist); + TAILQ_REMOVE(&ki->kaio_all, job, allist); ki->kaio_count--; if (!unmap) ki->kaio_buffer_count--; @@ -1358,10 +1358,10 @@ doerror: AIO_UNLOCK(ki); if (pbuf) { relpbuf(pbuf, NULL); - aiocbe->pbuf = NULL; + job->pbuf = NULL; } g_destroy_bio(bp); - aiocbe->bp = NULL; + job->bp = NULL; unref: dev_relthread(dev, ref); return (error); @@ -1373,7 +1373,7 @@ unref: static void aio_swake_cb(struct socket *so, struct sockbuf *sb) { - struct aiocblist *cb, *cbn; + struct kaiocb *job, *jobn; int opcode; SOCKBUF_LOCK_ASSERT(sb); @@ -1384,18 +1384,18 @@ aio_swake_cb(struct socket *so, struct s sb->sb_flags &= ~SB_AIO; mtx_lock(&aio_job_mtx); - TAILQ_FOREACH_SAFE(cb, &so->so_aiojobq, list, cbn) { - if (opcode == cb->uaiocb.aio_lio_opcode) { - if (cb->jobstate != JOBST_JOBQSOCK) + TAILQ_FOREACH_SAFE(job, &so->so_aiojobq, list, jobn) { + if (opcode == job->uaiocb.aio_lio_opcode) { + if (job->jobstate != JOBST_JOBQSOCK) panic("invalid queue value"); /* XXX * We don't have actual sockets backend yet, * so we simply move the requests to the generic * file I/O backend. */ - TAILQ_REMOVE(&so->so_aiojobq, cb, list); - TAILQ_INSERT_TAIL(&aio_jobs, cb, list); - aio_kick_nowait(cb->userproc); + TAILQ_REMOVE(&so->so_aiojobq, job, list); + TAILQ_INSERT_TAIL(&aio_jobs, job, list); + aio_kick_nowait(job->userproc); } } mtx_unlock(&aio_job_mtx); @@ -1515,14 +1515,14 @@ static struct aiocb_ops aiocb_ops_osigev * technique is done in this code. */ int -aio_aqueue(struct thread *td, struct aiocb *job, struct aioliojob *lj, +aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj, int type, struct aiocb_ops *ops) { struct proc *p = td->td_proc; cap_rights_t rights; struct file *fp; struct socket *so; - struct aiocblist *aiocbe, *cb; + struct kaiocb *job, *job2; struct kaioinfo *ki; struct kevent kev; struct sockbuf *sb; @@ -1537,57 +1537,57 @@ aio_aqueue(struct thread *td, struct aio ki = p->p_aioinfo; - ops->store_status(job, -1); - ops->store_error(job, 0); - ops->store_kernelinfo(job, -1); + ops->store_status(ujob, -1); + ops->store_error(ujob, 0); + ops->store_kernelinfo(ujob, -1); if (num_queue_count >= max_queue_count || ki->kaio_count >= ki->kaio_qallowed_count) { - ops->store_error(job, EAGAIN); + ops->store_error(ujob, EAGAIN); return (EAGAIN); } - aiocbe = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); - knlist_init_mtx(&aiocbe->klist, AIO_MTX(ki)); + job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); + knlist_init_mtx(&job->klist, AIO_MTX(ki)); - error = ops->copyin(job, &aiocbe->uaiocb); + error = ops->copyin(ujob, &job->uaiocb); if (error) { - ops->store_error(job, error); - uma_zfree(aiocb_zone, aiocbe); + ops->store_error(ujob, error); + uma_zfree(aiocb_zone, job); return (error); } /* XXX: aio_nbytes is later casted to signed types. */ - if (aiocbe->uaiocb.aio_nbytes > INT_MAX) { - uma_zfree(aiocb_zone, aiocbe); + if (job->uaiocb.aio_nbytes > INT_MAX) { + uma_zfree(aiocb_zone, job); return (EINVAL); } - if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && - aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && - aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && - aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { - ops->store_error(job, EINVAL); - uma_zfree(aiocb_zone, aiocbe); + if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && + job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && + job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && + job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { + ops->store_error(ujob, EINVAL); + uma_zfree(aiocb_zone, job); return (EINVAL); } - if ((aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || - aiocbe->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && - !_SIG_VALID(aiocbe->uaiocb.aio_sigevent.sigev_signo)) { - uma_zfree(aiocb_zone, aiocbe); + if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || + job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && + !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) { + uma_zfree(aiocb_zone, job); return (EINVAL); } - ksiginfo_init(&aiocbe->ksi); + ksiginfo_init(&job->ksi); /* Save userspace address of the job info. */ - aiocbe->uuaiocb = job; + job->ujob = ujob; /* Get the opcode. */ if (type != LIO_NOP) - aiocbe->uaiocb.aio_lio_opcode = type; - opcode = aiocbe->uaiocb.aio_lio_opcode; + job->uaiocb.aio_lio_opcode = type; + opcode = job->uaiocb.aio_lio_opcode; /* * Validate the opcode and fetch the file object for the specified @@ -1597,7 +1597,7 @@ aio_aqueue(struct thread *td, struct aio * retrieve a file descriptor without knowing what the capabiltity * should be. */ - fd = aiocbe->uaiocb.aio_fildes; + fd = job->uaiocb.aio_fildes; switch (opcode) { case LIO_WRITE: error = fget_write(td, fd, @@ -1620,8 +1620,8 @@ aio_aqueue(struct thread *td, struct aio error = EINVAL; } if (error) { - uma_zfree(aiocb_zone, aiocbe); - ops->store_error(job, error); + uma_zfree(aiocb_zone, job); + ops->store_error(ujob, error); return (error); } @@ -1630,60 +1630,60 @@ aio_aqueue(struct thread *td, struct aio goto aqueue_fail; } - if (opcode != LIO_SYNC && aiocbe->uaiocb.aio_offset == -1LL) { + if (opcode != LIO_SYNC && job->uaiocb.aio_offset == -1LL) { error = EINVAL; goto aqueue_fail; } - aiocbe->fd_file = fp; + job->fd_file = fp; mtx_lock(&aio_job_mtx); jid = jobrefid++; - aiocbe->seqno = jobseqno++; + job->seqno = jobseqno++; mtx_unlock(&aio_job_mtx); - error = ops->store_kernelinfo(job, jid); + error = ops->store_kernelinfo(ujob, jid); if (error) { error = EINVAL; goto aqueue_fail; } - aiocbe->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; + job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; if (opcode == LIO_NOP) { fdrop(fp, td); - uma_zfree(aiocb_zone, aiocbe); + uma_zfree(aiocb_zone, job); return (0); } - if (aiocbe->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) + if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) goto no_kqueue; - evflags = aiocbe->uaiocb.aio_sigevent.sigev_notify_kevent_flags; + evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags; if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) { error = EINVAL; goto aqueue_fail; } - kqfd = aiocbe->uaiocb.aio_sigevent.sigev_notify_kqueue; - kev.ident = (uintptr_t)aiocbe->uuaiocb; + kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue; + kev.ident = (uintptr_t)job->ujob; kev.filter = EVFILT_AIO; kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags; - kev.data = (intptr_t)aiocbe; - kev.udata = aiocbe->uaiocb.aio_sigevent.sigev_value.sival_ptr; + kev.data = (intptr_t)job; + kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; error = kqfd_register(kqfd, &kev, td, 1); aqueue_fail: if (error) { if (fp) fdrop(fp, td); - uma_zfree(aiocb_zone, aiocbe); - ops->store_error(job, error); + uma_zfree(aiocb_zone, job); + ops->store_error(ujob, error); goto done; } no_kqueue: - ops->store_error(job, EINPROGRESS); - aiocbe->uaiocb._aiocb_private.error = EINPROGRESS; - aiocbe->userproc = p; - aiocbe->cred = crhold(td->td_ucred); - aiocbe->jobflags = 0; - aiocbe->lio = lj; + ops->store_error(ujob, EINPROGRESS); + job->uaiocb._aiocb_private.error = EINPROGRESS; + job->userproc = p; + job->cred = crhold(td->td_ucred); + job->jobflags = 0; + job->lio = lj; if (opcode == LIO_SYNC) goto queueit; @@ -1695,7 +1695,7 @@ no_kqueue: * socket is ready to be read or written (based on the requested * operation). * - * If it is not ready for io, then queue the aiocbe on the + * If it is not ready for io, then queue the job on the * socket, and set the flags so we get a call when sbnotify() * happens. * @@ -1710,13 +1710,13 @@ no_kqueue: sb->sb_flags |= SB_AIO; mtx_lock(&aio_job_mtx); - TAILQ_INSERT_TAIL(&so->so_aiojobq, aiocbe, list); + TAILQ_INSERT_TAIL(&so->so_aiojobq, job, list); mtx_unlock(&aio_job_mtx); AIO_LOCK(ki); - TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); - TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); - aiocbe->jobstate = JOBST_JOBQSOCK; + TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); + TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); + job->jobstate = JOBST_JOBQSOCK; ki->kaio_count++; if (lj) lj->lioj_count++; @@ -1729,12 +1729,12 @@ no_kqueue: SOCKBUF_UNLOCK(sb); } - if ((error = aio_qphysio(p, aiocbe)) == 0) + if ((error = aio_qphysio(p, job)) == 0) goto done; #if 0 if (error > 0) { - aiocbe->uaiocb._aiocb_private.error = error; - ops->store_error(job, error); + job->uaiocb._aiocb_private.error = error; + ops->store_error(ujob, error); goto done; } #endif @@ -1745,35 +1745,35 @@ queueit: ki->kaio_count++; if (lj) lj->lioj_count++; - TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, aiocbe, plist); - TAILQ_INSERT_TAIL(&ki->kaio_all, aiocbe, allist); + TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); + TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); if (opcode == LIO_SYNC) { - TAILQ_FOREACH(cb, &ki->kaio_jobqueue, plist) { - if (cb->fd_file == aiocbe->fd_file && - cb->uaiocb.aio_lio_opcode != LIO_SYNC && - cb->seqno < aiocbe->seqno) { - cb->jobflags |= AIOCBLIST_CHECKSYNC; - aiocbe->pending++; + TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) { + if (job2->fd_file == job->fd_file && + job2->uaiocb.aio_lio_opcode != LIO_SYNC && + job2->seqno < job->seqno) { + job2->jobflags |= KAIOCB_CHECKSYNC; + job->pending++; } } - TAILQ_FOREACH(cb, &ki->kaio_bufqueue, plist) { - if (cb->fd_file == aiocbe->fd_file && - cb->uaiocb.aio_lio_opcode != LIO_SYNC && - cb->seqno < aiocbe->seqno) { - cb->jobflags |= AIOCBLIST_CHECKSYNC; - aiocbe->pending++; + TAILQ_FOREACH(job2, &ki->kaio_bufqueue, plist) { + if (job2->fd_file == job->fd_file && + job2->uaiocb.aio_lio_opcode != LIO_SYNC && + job2->seqno < job->seqno) { + job2->jobflags |= KAIOCB_CHECKSYNC; + job->pending++; } } - if (aiocbe->pending != 0) { - TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, aiocbe, list); - aiocbe->jobstate = JOBST_JOBQSYNC; + if (job->pending != 0) { + TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list); + job->jobstate = JOBST_JOBQSYNC; AIO_UNLOCK(ki); goto done; } } mtx_lock(&aio_job_mtx); - TAILQ_INSERT_TAIL(&aio_jobs, aiocbe, list); - aiocbe->jobstate = JOBST_JOBQGLOBAL; + TAILQ_INSERT_TAIL(&aio_jobs, job, list); + job->jobstate = JOBST_JOBQGLOBAL; aio_kick_nowait(p); mtx_unlock(&aio_job_mtx); AIO_UNLOCK(ki); @@ -1848,10 +1848,10 @@ aio_kick_helper(void *context, int pendi * released. */ static int -kern_aio_return(struct thread *td, struct aiocb *uaiocb, struct aiocb_ops *ops) +kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) { struct proc *p = td->td_proc; - struct aiocblist *cb; + struct kaiocb *job; struct kaioinfo *ki; int status, error; @@ -1859,26 +1859,26 @@ kern_aio_return(struct thread *td, struc if (ki == NULL) return (EINVAL); AIO_LOCK(ki); - TAILQ_FOREACH(cb, &ki->kaio_done, plist) { - if (cb->uuaiocb == uaiocb) + TAILQ_FOREACH(job, &ki->kaio_done, plist) { + if (job->ujob == ujob) *** DIFF OUTPUT TRUNCATED AT 1000 LINES ***