Date: Thu, 9 Aug 2018 12:11:50 +0000 (UTC) From: Olivier Houchard <cognet@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r337533 - in head/sys/contrib/ck: include/gcc/ppc include/spinlock src Message-ID: <201808091211.w79CBokk096121@repo.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: cognet Date: Thu Aug 9 12:11:49 2018 New Revision: 337533 URL: https://svnweb.freebsd.org/changeset/base/337533 Log: Import CK as of commit 08813496570879fbcc2adcdd9ddc0a054361bfde, mostly to avoid using lwsync on ppc32. Modified: head/sys/contrib/ck/include/gcc/ppc/ck_pr.h head/sys/contrib/ck/include/spinlock/hclh.h head/sys/contrib/ck/src/ck_barrier_combining.c Directory Properties: head/sys/contrib/ck/ (props changed) Modified: head/sys/contrib/ck/include/gcc/ppc/ck_pr.h ============================================================================== --- head/sys/contrib/ck/include/gcc/ppc/ck_pr.h Thu Aug 9 12:09:35 2018 (r337532) +++ head/sys/contrib/ck/include/gcc/ppc/ck_pr.h Thu Aug 9 12:11:49 2018 (r337533) @@ -67,21 +67,29 @@ ck_pr_stall(void) __asm__ __volatile__(I ::: "memory"); \ } -CK_PR_FENCE(atomic, "lwsync") -CK_PR_FENCE(atomic_store, "lwsync") +#ifdef CK_MD_PPC32_LWSYNC +#define CK_PR_LWSYNCOP "lwsync" +#else /* CK_MD_PPC32_LWSYNC_DISABLE */ +#define CK_PR_LWSYNCOP "sync" +#endif + +CK_PR_FENCE(atomic, CK_PR_LWSYNCOP) +CK_PR_FENCE(atomic_store, CK_PR_LWSYNCOP) CK_PR_FENCE(atomic_load, "sync") -CK_PR_FENCE(store_atomic, "lwsync") -CK_PR_FENCE(load_atomic, "lwsync") -CK_PR_FENCE(store, "lwsync") +CK_PR_FENCE(store_atomic, CK_PR_LWSYNCOP) +CK_PR_FENCE(load_atomic, CK_PR_LWSYNCOP) +CK_PR_FENCE(store, CK_PR_LWSYNCOP) CK_PR_FENCE(store_load, "sync") -CK_PR_FENCE(load, "lwsync") -CK_PR_FENCE(load_store, "lwsync") +CK_PR_FENCE(load, CK_PR_LWSYNCOP) +CK_PR_FENCE(load_store, CK_PR_LWSYNCOP) CK_PR_FENCE(memory, "sync") -CK_PR_FENCE(acquire, "lwsync") -CK_PR_FENCE(release, "lwsync") -CK_PR_FENCE(acqrel, "lwsync") -CK_PR_FENCE(lock, "lwsync") -CK_PR_FENCE(unlock, "lwsync") +CK_PR_FENCE(acquire, CK_PR_LWSYNCOP) +CK_PR_FENCE(release, CK_PR_LWSYNCOP) +CK_PR_FENCE(acqrel, CK_PR_LWSYNCOP) +CK_PR_FENCE(lock, CK_PR_LWSYNCOP) +CK_PR_FENCE(unlock, CK_PR_LWSYNCOP) + +#undef CK_PR_LWSYNCOP #undef CK_PR_FENCE Modified: head/sys/contrib/ck/include/spinlock/hclh.h ============================================================================== --- head/sys/contrib/ck/include/spinlock/hclh.h Thu Aug 9 12:09:35 2018 (r337532) +++ head/sys/contrib/ck/include/spinlock/hclh.h Thu Aug 9 12:11:49 2018 (r337533) @@ -81,6 +81,8 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_q thread->wait = true; thread->splice = false; thread->cluster_id = (*local_queue)->cluster_id; + /* Make sure previous->previous doesn't appear to be NULL */ + thread->previous = *local_queue; /* Serialize with respect to update of local queue. */ ck_pr_fence_store_atomic(); @@ -91,13 +93,15 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_q /* Wait until previous thread from the local queue is done with lock. */ ck_pr_fence_load(); - if (previous->previous != NULL && - previous->cluster_id == thread->cluster_id) { - while (ck_pr_load_uint(&previous->wait) == true) + if (previous->previous != NULL) { + while (ck_pr_load_uint(&previous->wait) == true && + ck_pr_load_int(&previous->cluster_id) == thread->cluster_id && + ck_pr_load_uint(&previous->splice) == false) ck_pr_stall(); /* We're head of the global queue, we're done */ - if (ck_pr_load_uint(&previous->splice) == false) + if (ck_pr_load_int(&previous->cluster_id) == thread->cluster_id && + ck_pr_load_uint(&previous->splice) == false) return; } Modified: head/sys/contrib/ck/src/ck_barrier_combining.c ============================================================================== --- head/sys/contrib/ck/src/ck_barrier_combining.c Thu Aug 9 12:09:35 2018 (r337532) +++ head/sys/contrib/ck/src/ck_barrier_combining.c Thu Aug 9 12:11:49 2018 (r337533) @@ -35,7 +35,7 @@ struct ck_barrier_combining_queue { struct ck_barrier_combining_group *tail; }; -CK_CC_INLINE static struct ck_barrier_combining_group * +static struct ck_barrier_combining_group * ck_barrier_combining_queue_dequeue(struct ck_barrier_combining_queue *queue) { struct ck_barrier_combining_group *front = NULL; @@ -48,7 +48,7 @@ ck_barrier_combining_queue_dequeue(struct ck_barrier_c return front; } -CK_CC_INLINE static void +static void ck_barrier_combining_insert(struct ck_barrier_combining_group *parent, struct ck_barrier_combining_group *tnode, struct ck_barrier_combining_group **child) @@ -72,7 +72,7 @@ ck_barrier_combining_insert(struct ck_barrier_combinin * into the barrier's tree. We use a queue to implement this * traversal. */ -CK_CC_INLINE static void +static void ck_barrier_combining_queue_enqueue(struct ck_barrier_combining_queue *queue, struct ck_barrier_combining_group *node_value) { @@ -185,10 +185,10 @@ ck_barrier_combining_aux(struct ck_barrier_combining * ck_pr_fence_store(); ck_pr_store_uint(&tnode->sense, ~tnode->sense); } else { - ck_pr_fence_memory(); while (sense != ck_pr_load_uint(&tnode->sense)) ck_pr_stall(); } + ck_pr_fence_memory(); return; }
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201808091211.w79CBokk096121>