Date: Mon, 31 Jul 2017 01:18:21 +0000 (UTC) From: Ian Lepore <ian@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r321745 - in head/sys: kern sys Message-ID: <201707310118.v6V1ILjF048523@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: ian Date: Mon Jul 31 01:18:21 2017 New Revision: 321745 URL: https://svnweb.freebsd.org/changeset/base/321745 Log: Add clock_schedule(), a feature that allows realtime clock drivers to request that their clock_settime() methods be called at a given offset from top-of-second. This adds a timeout_task to the rtc_instance so that each clock can be separately added to taskqueue_thread with the scheduling it prefers, instead of looping through all the clocks at once with a single task on taskqueue_thread. If a driver doesn't call clock_schedule() the default is the old behavior: clock_settime() is queued immediately. The motivation behind this is that I was on the path of adding identical code to a third RTC driver to figure out a delta to top-of-second and sleep for that amount of time because writing the the RTC registers resets the hardware's concept of top-of-second. (Sometimes it's not top-of-second, some RTC clocks tick over a half second after you set their time registers.) Worst-case would be to sleep for almost a full second, which is a rude thing to do on a shared task queue thread. Modified: head/sys/kern/subr_rtc.c head/sys/sys/clock.h Modified: head/sys/kern/subr_rtc.c ============================================================================== --- head/sys/kern/subr_rtc.c Mon Jul 31 00:59:28 2017 (r321744) +++ head/sys/kern/subr_rtc.c Mon Jul 31 01:18:21 2017 (r321745) @@ -95,7 +95,10 @@ struct rtc_instance { device_t clockdev; int resolution; int flags; + u_int schedns; struct timespec resadj; + struct timeout_task + stask; LIST_ENTRY(rtc_instance) rtc_entries; }; @@ -104,7 +107,6 @@ struct rtc_instance { * Clocks are updated using a task running on taskqueue_thread. */ static void settime_task_func(void *arg, int pending); -static struct task settime_task = TASK_INITIALIZER(0, settime_task_func, NULL); /* * Registered clocks are kept in a list which is sorted by resolution; the more @@ -116,9 +118,9 @@ static struct sx rtc_list_lock; SX_SYSINIT(rtc_list_lock_init, &rtc_list_lock, "rtc list"); /* - * On the task thread, invoke the clock_settime() method of each registered - * clock. Do so holding only an sxlock, so that clock drivers are free to do - * whatever kind of locking or sleeping they need to. + * On the task thread, invoke the clock_settime() method of the clock. Do so + * holding no locks, so that clock drivers are free to do whatever kind of + * locking or sleeping they need to. */ static void settime_task_func(void *arg, int pending) @@ -126,21 +128,18 @@ settime_task_func(void *arg, int pending) struct timespec ts; struct rtc_instance *rtc; - sx_xlock(&rtc_list_lock); - LIST_FOREACH(rtc, &rtc_list, rtc_entries) { - if (!(rtc->flags & CLOCKF_SETTIME_NO_TS)) { - getnanotime(&ts); - if (!(rtc->flags & CLOCKF_SETTIME_NO_ADJ)) { - ts.tv_sec -= utc_offset(); - timespecadd(&ts, &rtc->resadj); - } - } else { - ts.tv_sec = 0; - ts.tv_nsec = 0; + rtc = arg; + if (!(rtc->flags & CLOCKF_SETTIME_NO_TS)) { + getnanotime(&ts); + if (!(rtc->flags & CLOCKF_SETTIME_NO_ADJ)) { + ts.tv_sec -= utc_offset(); + timespecadd(&ts, &rtc->resadj); } - CLOCK_SETTIME(rtc->clockdev, &ts); + } else { + ts.tv_sec = 0; + ts.tv_nsec = 0; } - sx_xunlock(&rtc_list_lock); + CLOCK_SETTIME(rtc->clockdev, &ts); } void @@ -152,8 +151,11 @@ clock_register_flags(device_t clockdev, long resolutio newrtc->clockdev = clockdev; newrtc->resolution = (int)resolution; newrtc->flags = flags; + newrtc->schedns = 0; newrtc->resadj.tv_sec = newrtc->resolution / 2 / 1000000; newrtc->resadj.tv_nsec = newrtc->resolution / 2 % 1000000 * 1000; + TIMEOUT_TASK_INIT(taskqueue_thread, &newrtc->stask, 0, + settime_task_func, newrtc); sx_xlock(&rtc_list_lock); if (LIST_EMPTY(&rtc_list)) { @@ -192,12 +194,32 @@ clock_unregister(device_t clockdev) LIST_FOREACH_SAFE(rtc, &rtc_list, rtc_entries, tmp) { if (rtc->clockdev == clockdev) { LIST_REMOVE(rtc, rtc_entries); - free(rtc, M_DEVBUF); + break; } } sx_xunlock(&rtc_list_lock); + if (rtc != NULL) { + taskqueue_cancel_timeout(taskqueue_thread, &rtc->stask, NULL); + taskqueue_drain_timeout(taskqueue_thread, &rtc->stask); + free(rtc, M_DEVBUF); + } } +void +clock_schedule(device_t clockdev, u_int offsetns) +{ + struct rtc_instance *rtc; + + sx_xlock(&rtc_list_lock); + LIST_FOREACH(rtc, &rtc_list, rtc_entries) { + if (rtc->clockdev == clockdev) { + rtc->schedns = offsetns; + break; + } + } + sx_xunlock(&rtc_list_lock); +} + /* * Initialize the system time. Must be called from a context which does not * restrict any locking or sleeping that clock drivers may need to do. @@ -275,9 +297,26 @@ inittodr(time_t base) void resettodr(void) { + struct timespec now; + struct rtc_instance *rtc; + sbintime_t sbt; + long waitns; if (disable_rtc_set) return; - taskqueue_enqueue(taskqueue_thread, &settime_task); + sx_xlock(&rtc_list_lock); + LIST_FOREACH(rtc, &rtc_list, rtc_entries) { + if (rtc->schedns != 0) { + getnanotime(&now); + waitns = rtc->schedns - now.tv_nsec; + if (waitns < 0) + waitns += 1000000000; + sbt = nstosbt(waitns); + } else + sbt = 0; + taskqueue_enqueue_timeout_sbt(taskqueue_thread, + &rtc->stask, -sbt, 0, C_PREL(31)); + } + sx_xunlock(&rtc_list_lock); } Modified: head/sys/sys/clock.h ============================================================================== --- head/sys/sys/clock.h Mon Jul 31 00:59:28 2017 (r321744) +++ head/sys/sys/clock.h Mon Jul 31 01:18:21 2017 (r321745) @@ -77,10 +77,17 @@ int clock_ct_to_ts(struct clocktime *, struct timespec void clock_ts_to_ct(struct timespec *, struct clocktime *); /* - * Time-of-day clock register/unregister functions, and associated flags. These - * functions can sleep. Upon return from unregister, the clock's methods are - * not running and will not be called again. + * Time-of-day clock functions and flags. These functions might sleep. * + * clock_register and clock_unregister() do what they say. Upon return from + * unregister, the clock's methods are not running and will not be called again. + * + * clock_schedule() requests that a registered clock's clock_settime() calls + * happen at the given offset into the second. The default is 0, meaning no + * specific scheduling. To schedule the call as soon after top-of-second as + * possible, specify 1. Each clock has its own schedule, but taskqueue_thread + * is shared by many tasks; the timing of the call is not guaranteed. + * * Flags: * * CLOCKF_SETTIME_NO_TS @@ -102,6 +109,7 @@ void clock_ts_to_ct(struct timespec *, struct clocktim void clock_register(device_t _clockdev, long _resolution_us); void clock_register_flags(device_t _clockdev, long _resolution_us, int _flags); +void clock_schedule(device_t clockdev, u_int _offsetns); void clock_unregister(device_t _clockdev); /*
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201707310118.v6V1ILjF048523>