Date: Fri, 5 Jun 2009 17:03:45 GMT From: Edward Tomasz Napierala <trasz@FreeBSD.org> To: Perforce Change Reviews <perforce@freebsd.org> Subject: PERFORCE change 163587 for review Message-ID: <200906051703.n55H3jYI029064@repoman.freebsd.org>
next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=163587 Change 163587 by trasz@trasz_victim on 2009/06/05 17:03:09 Use rbtree to store HRL limits. Still not quite right, but less obviously wrong than before. Remove limits for exiting processes. Affected files ... .. //depot/projects/soc2009/trasz_limits/sys/kern/kern_hrl.c#4 edit Differences ... ==== //depot/projects/soc2009/trasz_limits/sys/kern/kern_hrl.c#4 (text+ko) ==== @@ -30,6 +30,7 @@ #include <sys/hrl.h> #include <sys/param.h> #include <sys/malloc.h> +#include <sys/queue.h> #include <sys/kernel.h> #include <sys/priv.h> #include <sys/proc.h> @@ -40,19 +41,64 @@ #include <sys/eventhandler.h> #include <sys/lock.h> #include <sys/mutex.h> +#include <sys/tree.h> +#include <vm/uma.h> + +struct hrl_node { + struct hrl_limit hn_limit; + RB_ENTRY(hrl_node) hn_next; +}; + +/* + * XXX: This is silly. Some better way of organising these + * will be required. + */ +static int +hn_compare(const struct hrl_node *a, const struct hrl_node *b) +{ + if (a->hn_limit.hl_subject < b->hn_limit.hl_subject) + return (-1); + else if (a->hn_limit.hl_subject > b->hn_limit.hl_subject) + return (1); + + if (a->hn_limit.hl_subject_id < b->hn_limit.hl_subject_id) + return (-1); + else if (a->hn_limit.hl_subject_id > b->hn_limit.hl_subject_id) + return (1); -MALLOC_DEFINE(M_HRL, "hrl", "Hierarchical Resource Limits"); + if (a->hn_limit.hl_per < b->hn_limit.hl_per) + return (-1); + else if (a->hn_limit.hl_per > b->hn_limit.hl_per) + return (1); + + if (a->hn_limit.hl_object < b->hn_limit.hl_object) + return (-1); + else if (a->hn_limit.hl_object > b->hn_limit.hl_object) + return (1); + + if (a->hn_limit.hl_action < b->hn_limit.hl_action) + return (-1); + else if (a->hn_limit.hl_action > b->hn_limit.hl_action) + return (1); + + return (0); +} /* - * XXX: Need a better way to store stuff; rbtree? + * hrl_lock must be held during all operations on hrls. */ -static struct hrl_limit *limits = NULL; -static int nlimits = 0; -static struct mtx hrllock; +RB_HEAD(hrl_tree, hrl_node) hrls = RB_INITIALIZER(hrls); +RB_GENERATE_STATIC(hrl_tree, hrl_node, hn_next, hn_compare); static void hrl_init(void); SYSINIT(hrl, SI_SUB_RUN_SCHEDULER, SI_ORDER_SECOND, hrl_init, NULL); +static int nhrls = 0; +static uma_zone_t hrl_zone; +static struct mtx hrl_lock; + +MALLOC_DEFINE(M_HRL, "hrl", "Hierarchical Resource Limits"); + int hrl_alloc(int object, uint64_t amount) { @@ -72,34 +118,54 @@ void hrl_adjust(int subject, id_t subject_id, int per, int object, int action, int64_t amount) { + struct hrl_node searched, *node, *existing; + + searched.hn_limit.hl_subject = subject; + searched.hn_limit.hl_subject_id = subject_id; + searched.hn_limit.hl_per = per; + searched.hn_limit.hl_object = object; + searched.hn_limit.hl_action = action; + /* - * Limit removal? + * Removing a limit. */ - if (amount == 0) + if (amount == 0) { + mtx_lock(&hrl_lock); + node = RB_FIND(hrl_tree, &hrls, &searched); + if (node != NULL) { + node = RB_REMOVE(hrl_tree, &hrls, node); + KASSERT(node != NULL, ("node removal failed")); + nhrls--; + } + mtx_unlock(&hrl_lock); + if (node != NULL) + uma_zfree(hrl_zone, node); return; + } - mtx_lock(&hrllock); - - nlimits++; /* - * XXX: Make it possible to remove and overwrite limits, not just add them. + * Adding a new limit or changing existing one. */ - limits = realloc(limits, sizeof(struct hrl_limit) * nlimits, M_HRL, M_WAITOK); - - limits[nlimits - 1].hl_subject = subject; - limits[nlimits - 1].hl_subject_id = subject_id; - limits[nlimits - 1].hl_per = per; - limits[nlimits - 1].hl_object = object; - limits[nlimits - 1].hl_action = action; - limits[nlimits - 1].hl_amount = amount; - - mtx_unlock(&hrllock); + node = uma_zalloc(hrl_zone, M_WAITOK); + *node = searched; + mtx_lock(&hrl_lock); + existing = RB_INSERT(hrl_tree, &hrls, node); + if (existing != NULL) { + existing->hn_limit.hl_amount = amount; + } else { + node->hn_limit.hl_amount = amount; + nhrls++; + } + mtx_unlock(&hrl_lock); + if (existing != NULL) + uma_zfree(hrl_zone, node); } /* * System calls. */ +#if 0 static int hrl_check(struct hrl_limit *limits, int nlimits) { @@ -120,10 +186,12 @@ return (0); } +#endif int hrl_set(struct thread *td, struct hrl_set_args *uap) { +#if 0 int error; size_t buflen; struct hrl_limit *newlimits; @@ -143,11 +211,11 @@ * Removing all the limits? */ if (uap->nentries == 0) { - mtx_lock(&hrllock); + mtx_lock(&hrl_lock); free(limits, M_HRL); limits = NULL; - nlimits = 0; - mtx_unlock(&hrllock); + nhrls = 0; + mtx_unlock(&hrl_lock); return (0); } @@ -163,79 +231,125 @@ if (error) goto out; - mtx_lock(&hrllock); + mtx_lock(&hrl_lock); if (limits != NULL) free(limits, M_HRL); limits = newlimits; - nlimits = uap->nentries; + nhrls = uap->nentries; - mtx_unlock(&hrllock); + mtx_unlock(&hrl_lock); return (0); out: free(newlimits, M_HRL); return (error); +#else + return (ENOSYS); +#endif } int hrl_get(struct thread *td, struct hrl_get_args *uap) { - int error; + int error = 0, copied = 0; size_t buflen; + struct hrl_node *node; + struct hrl_limit *buf; + + if (uap->nentries == 0 && uap->bufp == NULL) { + mtx_lock(&hrl_lock); + error = suword(uap->required, nhrls); + mtx_unlock(&hrl_lock); - error = priv_check(td, PRIV_HRL_SET); - if (error) return (error); + } + if (uap->nentries < 0 || uap->nentries >= HRL_MAX_LIMITS) + return (EINVAL); + + buflen = sizeof(struct hrl_limit) * uap->nentries; + buf = malloc(buflen, M_HRL, M_WAITOK); + /* - * XXX: Check for being in jail? + * Copy the limits to the temporary buffer. We cannot + * copy it directly to the userland because of the mutex. */ - - mtx_lock(&hrllock); - - if (suword(uap->required, nlimits) != 0) { - error = EINVAL; - goto out; + mtx_lock(&hrl_lock); + RB_FOREACH(node, hrl_tree, &hrls) { + /* + * XXX: Do not show everything to the client; just the + * nodes that affect him. + */ + if (copied >= uap->nentries) { + error = EFBIG; + break; + } + *(buf + copied) = node->hn_limit; + copied++; } - - if (uap->nentries == 0 && uap->bufp == NULL) { - error = 0; + mtx_unlock(&hrl_lock); + if (error) goto out; - } - if (uap->nentries < nlimits) { - error = EFBIG; + error = copyout(buf, uap->bufp, sizeof(struct hrl_limit) * copied); + if (error) goto out; - } - if (nlimits == 0) { - error = 0; + if (suword(uap->required, copied) != 0) { + error = EINVAL; goto out; } - buflen = sizeof(struct hrl_limit) * nlimits; - error = copyout(limits, uap->bufp, buflen); - out: - mtx_unlock(&hrllock); - + free(buf, M_HRL); + return (error); } static void hrl_proc_exit(void *arg __unused, struct proc *p) { + struct hrl_node *node, *next; + /* - * XXX: Remove per-process limits here. + * Go through all the limits, looking for the ones with subject + * equal to the exiting process, and remove them. + * + * XXX: What are we gonna do - insert a HRL entry for every process + * that inherits a limit set with setrlimit(2), or do some + * magic here, moving limits from the parent process that's + * exiting to its children? */ +restart: + mtx_lock(&hrl_lock); + for (node = RB_MIN(hrl_tree, &hrls); node != NULL; node = next) { + next = RB_NEXT(hrl_tree, &hrls, node); + + if (node->hn_limit.hl_subject != HRL_SUBJECT_PROCESS) + continue; + if (node->hn_limit.hl_subject_id != p->p_pid) + continue; + + node = RB_REMOVE(hrl_tree, &hrls, node); + KASSERT(node != NULL, ("node removal failed")); + nhrls--; + + mtx_unlock(&hrl_lock); + uma_zfree(hrl_zone, node); + goto restart; + } + mtx_unlock(&hrl_lock); } static void hrl_init(void) { - mtx_init(&hrllock, "hrl lock", NULL, MTX_DEF); + + hrl_zone = uma_zcreate("hrl", sizeof(struct hrl_node), NULL, NULL, + NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); + mtx_init(&hrl_lock, "hrl lock", NULL, MTX_DEF); EVENTHANDLER_REGISTER(process_exit, hrl_proc_exit, NULL, EVENTHANDLER_PRI_ANY); }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200906051703.n55H3jYI029064>