Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 8 Jan 2016 21:40:15 -0500 (EST)
From:      Rick Macklem <rmacklem@uoguelph.ca>
To:        Jeff Darcy <jdarcy@redhat.com>
Cc:        Raghavendra G <raghavendra@gluster.com>, freebsd-fs <freebsd-fs@freebsd.org>, Hubbard Jordan <jkh@ixsystems.com>,  Xavier Hernandez <xhernandez@datalab.es>,  Gluster Devel <gluster-devel@gluster.org>
Subject:   Re: [Gluster-devel] FreeBSD port of GlusterFS racks up a lot of CPU usage
Message-ID:  <2013962695.154259810.1452307215168.JavaMail.zimbra@uoguelph.ca>
In-Reply-To: <1924941590.6473225.1452248249994.JavaMail.zimbra@redhat.com>
References:  <571237035.145690509.1451437960464.JavaMail.zimbra@uoguelph.ca> <20151230103152.GS13942@ndevos-x240.usersys.redhat.com> <2D8C2729-D556-479B-B4E2-66E1BB222F41@ixsystems.com> <1083933309.146084334.1451517977647.JavaMail.zimbra@uoguelph.ca> <CADRNtgStOg8UZfxNt-SzvvPf7d1J7CC_gi49ww3BbixU0Ey-rg@mail.gmail.com> <568F6D07.6070500@datalab.es> <CADRNtgRM17Eg3Z=LWifVNo=ai72dMiEVRKS3RwNfQ-dK7Pspew@mail.gmail.com> <1924941590.6473225.1452248249994.JavaMail.zimbra@redhat.com>

index | next in thread | previous in thread | raw e-mail

[-- Attachment #1 --]
Oops, I realized the last patch did a write(2) while holding a pthread_mutex.
I've never used pthread_mutexes, but I suspect this isn't allowed. The attached
updated patch delays the write() until after the pthread_mutex_unlock().

Sorry about the confusion, rick

----- Original Message -----
> > > I don't know anything about gluster's poll implementation so I may
> > > be totally wrong, but would it be possible to use an eventfd (or a
> > > pipe if eventfd is not supported) to signal the need to add more
> > > file descriptors to the poll call ?
> > >
> > >
> > > The poll call should listen on this new fd. When we need to change
> > > the fd list, we should simply write to the eventfd or pipe from
> > > another thread.  This will cause the poll call to return and we will
> > > be able to change the fd list without having a short timeout nor
> > > having to decide on any trade-off.
> > 
> >
> > Thats a nice idea. Based on my understanding of why timeouts are being
> > used, this approach can work.
> 
> The own-thread code which preceded the current poll implementation did
> something similar, using a pipe fd to be woken up for new *outgoing*
> messages.  That code still exists, and might provide some insight into
> how to do this for the current poll code.
> _______________________________________________
> freebsd-fs@freebsd.org mailing list
> https://lists.freebsd.org/mailman/listinfo/freebsd-fs
> To unsubscribe, send any mail to "freebsd-fs-unsubscribe@freebsd.org"
> 

[-- Attachment #2 --]
--- glusterfs-3.7.6/libglusterfs/src/event-poll.c.sav	2016-01-06 15:58:03.522286000 -0500
+++ glusterfs-3.7.6/libglusterfs/src/event-poll.c	2016-01-08 18:14:57.658652000 -0500
@@ -180,6 +180,15 @@ event_pool_new_poll (int count, int even
         return event_pool;
 }
 
+static void
+event_pool_changed (struct event_pool *event_pool)
+{
+
+        /* Write a byte into the breaker pipe to wake up poll(). */
+        if (event_pool->breaker[1] >= 0)
+                write(event_pool->breaker[1], "X", 1);
+}
+
 
 static int
 event_register_poll (struct event_pool *event_pool, int fd,
@@ -187,6 +196,7 @@ event_register_poll (struct event_pool *
                      void *data, int poll_in, int poll_out)
 {
         int idx = -1;
+        int changed = 0;
 
         GF_VALIDATE_OR_GOTO ("event", event_pool, out);
 
@@ -245,10 +255,13 @@ event_register_poll (struct event_pool *
                 }
 
                 event_pool->changed = 1;
+                changed = 1;
 
         }
 unlock:
         pthread_mutex_unlock (&event_pool->mutex);
+        if (changed != 0)
+                event_pool_changed(event_pool);
 
 out:
         return idx;
@@ -259,6 +272,7 @@ static int
 event_unregister_poll (struct event_pool *event_pool, int fd, int idx_hint)
 {
         int idx = -1;
+        int changed = 0;
 
         GF_VALIDATE_OR_GOTO ("event", event_pool, out);
 
@@ -276,9 +290,12 @@ event_unregister_poll (struct event_pool
 
                 event_pool->reg[idx] =  event_pool->reg[--event_pool->used];
                 event_pool->changed = 1;
+                changed = 1;
         }
 unlock:
         pthread_mutex_unlock (&event_pool->mutex);
+        if (changed != 0)
+                event_pool_changed(event_pool);
 
 out:
         return idx;
@@ -304,6 +321,7 @@ event_select_on_poll (struct event_pool 
                       int poll_in, int poll_out)
 {
         int idx = -1;
+        int changed = 0;
 
         GF_VALIDATE_OR_GOTO ("event", event_pool, out);
 
@@ -349,11 +367,15 @@ event_select_on_poll (struct event_pool 
                         break;
                 }
 
-                if (poll_in + poll_out > -2)
+                if (poll_in + poll_out > -2) {
                         event_pool->changed = 1;
+                        changed = 1;
+                }
         }
 unlock:
         pthread_mutex_unlock (&event_pool->mutex);
+        if (changed != 0)
+                event_pool_changed(event_pool);
 
 out:
         return idx;
@@ -448,6 +470,7 @@ event_dispatch_poll (struct event_pool *
         int              size = 0;
         int              i = 0;
         int              ret = -1;
+        char             x;
 
         GF_VALIDATE_OR_GOTO ("event", event_pool, out);
 
@@ -472,7 +495,7 @@ event_dispatch_poll (struct event_pool *
                 size = event_dispatch_poll_resize (event_pool, ufds, size);
                 ufds = event_pool->evcache;
 
-                ret = poll (ufds, size, 1);
+                ret = poll (ufds, size, -1);
 
                 if (ret == 0)
                         /* timeout */
@@ -482,7 +505,13 @@ event_dispatch_poll (struct event_pool *
                         /* sys call */
                         continue;
 
-                for (i = 0; i < size; i++) {
+                if (ufds[0].revents != 0 && event_pool->breaker[0] >= 0) {
+                        /* Just read all the junk in the breaker pipe. */
+                        while (read(event_pool->breaker[0], &x, 1) > 0)
+                                ;
+                }
+
+                for (i = 1; i < size; i++) {
                         if (!ufds[i].revents)
                                 continue;
 
home | help

Want to link to this message? Use this
URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?2013962695.154259810.1452307215168.JavaMail.zimbra>