Date: Fri, 12 Jun 2015 07:50:35 +0000 (UTC) From: Roger Pau Monné <royger@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r284296 - in head/sys: dev/xen/blkback dev/xen/blkfront xen xen/interface/io Message-ID: <201506120750.t5C7oZ92025431@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: royger Date: Fri Jun 12 07:50:34 2015 New Revision: 284296 URL: https://svnweb.freebsd.org/changeset/base/284296 Log: xen-blk{front/back}: remove broken FreeBSD extensions The FreeBSD extension adds a new request type, called blkif_segment_block which has a size of 112bytes for both i386 and amd64. This is fine on amd64, since requests have a size of 112B there also. But this is not true for i386, where requests have a size of 108B. So on i386 we basically overrun the ring slot when queuing a request of type blkif_segment_block_t, which is very bad. Remove this extension (including a cleanup of the public blkif.h header file) from blkfront and blkback. Sponsored by: Citrix Systems R&D Tested-by: cperciva Modified: head/sys/dev/xen/blkback/blkback.c head/sys/dev/xen/blkfront/blkfront.c head/sys/dev/xen/blkfront/block.h head/sys/xen/blkif.h head/sys/xen/interface/io/blkif.h Modified: head/sys/dev/xen/blkback/blkback.c ============================================================================== --- head/sys/dev/xen/blkback/blkback.c Fri Jun 12 07:23:55 2015 (r284295) +++ head/sys/dev/xen/blkback/blkback.c Fri Jun 12 07:50:34 2015 (r284296) @@ -85,11 +85,19 @@ __FBSDID("$FreeBSD$"); /*--------------------------- Compile-time Tunables --------------------------*/ /** + * The maximum number of shared memory ring pages we will allow in a + * negotiated block-front/back communication channel. Allow enough + * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd. + */ +#define XBB_MAX_RING_PAGES 32 + +/** * The maximum number of outstanding request blocks (request headers plus * additional segment blocks) we will allow in a negotiated block-front/back * communication channel. */ -#define XBB_MAX_REQUESTS 256 +#define XBB_MAX_REQUESTS \ + __CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES) /** * \brief Define to force all I/O to be performed on memory owned by the @@ -148,14 +156,6 @@ static MALLOC_DEFINE(M_XENBLOCKBACK, "xb (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))) /** - * The maximum number of shared memory ring pages we will allow in a - * negotiated block-front/back communication channel. Allow enough - * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd. - */ -#define XBB_MAX_RING_PAGES \ - BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBB_MAX_SEGMENTS_PER_REQUEST) \ - * XBB_MAX_REQUESTS) -/** * The maximum number of ring pages that we can allow per request list. * We limit this to the maximum number of segments per request, because * that is already a reasonable number of segments to aggregate. This @@ -1328,7 +1328,7 @@ xbb_queue_response(struct xbb_softc *xbb if (status != BLKIF_RSP_OKAY) xbb->reqs_completed_with_error++; - xbb->rings.common.rsp_prod_pvt += BLKIF_SEGS_TO_BLOCKS(req->nr_pages); + xbb->rings.common.rsp_prod_pvt++; xbb->reqs_queued_for_completion++; @@ -1666,87 +1666,49 @@ xbb_dispatch_io(struct xbb_softc *xbb, s goto send_response; } - block_segs = MIN(nreq->nr_pages, - BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK); + block_segs = nseg; sg = ring_req->seg; last_block_sg = sg + block_segs; - while (1) { - while (sg < last_block_sg) { - KASSERT(seg_idx < - XBB_MAX_SEGMENTS_PER_REQLIST, - ("seg_idx %d is too large, max " - "segs %d\n", seg_idx, - XBB_MAX_SEGMENTS_PER_REQLIST)); - - xbb_sg->first_sect = sg->first_sect; - xbb_sg->last_sect = sg->last_sect; - xbb_sg->nsect = - (int8_t)(sg->last_sect - - sg->first_sect + 1); - - if ((sg->last_sect >= (PAGE_SIZE >> 9)) - || (xbb_sg->nsect <= 0)) { - reqlist->status = BLKIF_RSP_ERROR; - goto send_response; - } - - nr_sects += xbb_sg->nsect; - map->host_addr = xbb_get_gntaddr(reqlist, - seg_idx, /*sector*/0); - KASSERT(map->host_addr + PAGE_SIZE <= - xbb->ring_config.gnt_addr, - ("Host address %#jx len %d overlaps " - "ring address %#jx\n", - (uintmax_t)map->host_addr, PAGE_SIZE, - (uintmax_t)xbb->ring_config.gnt_addr)); - - map->flags = GNTMAP_host_map; - map->ref = sg->gref; - map->dom = xbb->otherend_id; - if (operation == BIO_WRITE) - map->flags |= GNTMAP_readonly; - sg++; - map++; - xbb_sg++; - seg_idx++; - req_seg_idx++; + while (sg < last_block_sg) { + KASSERT(seg_idx < + XBB_MAX_SEGMENTS_PER_REQLIST, + ("seg_idx %d is too large, max " + "segs %d\n", seg_idx, + XBB_MAX_SEGMENTS_PER_REQLIST)); + + xbb_sg->first_sect = sg->first_sect; + xbb_sg->last_sect = sg->last_sect; + xbb_sg->nsect = + (int8_t)(sg->last_sect - + sg->first_sect + 1); + + if ((sg->last_sect >= (PAGE_SIZE >> 9)) + || (xbb_sg->nsect <= 0)) { + reqlist->status = BLKIF_RSP_ERROR; + goto send_response; } - block_segs = MIN(nseg - req_seg_idx, - BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK); - if (block_segs == 0) - break; - - /* - * Fetch the next request block full of SG elements. - * For now, only the spacing between entries is - * different in the different ABIs, not the sg entry - * layout. - */ - req_ring_idx++; - switch (xbb->abi) { - case BLKIF_PROTOCOL_NATIVE: - sg = BLKRING_GET_SEG_BLOCK(&xbb->rings.native, - req_ring_idx); - break; - case BLKIF_PROTOCOL_X86_32: - { - sg = BLKRING_GET_SEG_BLOCK(&xbb->rings.x86_32, - req_ring_idx); - break; - } - case BLKIF_PROTOCOL_X86_64: - { - sg = BLKRING_GET_SEG_BLOCK(&xbb->rings.x86_64, - req_ring_idx); - break; - } - default: - panic("Unexpected blkif protocol ABI."); - /* NOTREACHED */ - } - last_block_sg = sg + block_segs; + nr_sects += xbb_sg->nsect; + map->host_addr = xbb_get_gntaddr(reqlist, + seg_idx, /*sector*/0); + KASSERT(map->host_addr + PAGE_SIZE <= + xbb->ring_config.gnt_addr, + ("Host address %#jx len %d overlaps " + "ring address %#jx\n", + (uintmax_t)map->host_addr, PAGE_SIZE, + (uintmax_t)xbb->ring_config.gnt_addr)); + + map->flags = GNTMAP_host_map; + map->ref = sg->gref; + map->dom = xbb->otherend_id; + if (operation == BIO_WRITE) + map->flags |= GNTMAP_readonly; + sg++; + map++; + xbb_sg++; + seg_idx++; + req_seg_idx++; } /* Convert to the disk's sector size */ @@ -2000,8 +1962,7 @@ xbb_run_queue(void *context, int pending * response be generated before we make room in * the queue for that response. */ - xbb->rings.common.req_cons += - BLKIF_SEGS_TO_BLOCKS(ring_req->nr_segments); + xbb->rings.common.req_cons++; xbb->reqs_received++; cur_size = xbb_count_sects(ring_req); @@ -3091,7 +3052,7 @@ xbb_collect_frontend_info(struct xbb_sof * Protocol defaults valid even if all negotiation fails. */ xbb->ring_config.ring_pages = 1; - xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; + xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE; /* @@ -3122,60 +3083,23 @@ xbb_collect_frontend_info(struct xbb_sof * fields. */ ring_page_order = 0; + xbb->max_requests = 32; + (void)xs_scanf(XST_NIL, otherend_path, "ring-page-order", NULL, "%u", &ring_page_order); xbb->ring_config.ring_pages = 1 << ring_page_order; - (void)xs_scanf(XST_NIL, otherend_path, - "num-ring-pages", NULL, "%u", - &xbb->ring_config.ring_pages); ring_size = PAGE_SIZE * xbb->ring_config.ring_pages; xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size); - (void)xs_scanf(XST_NIL, otherend_path, - "max-requests", NULL, "%u", - &xbb->max_requests); - - (void)xs_scanf(XST_NIL, otherend_path, - "max-request-segments", NULL, "%u", - &xbb->max_request_segments); - - (void)xs_scanf(XST_NIL, otherend_path, - "max-request-size", NULL, "%u", - &xbb->max_request_size); - if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) { xenbus_dev_fatal(xbb->dev, EINVAL, "Front-end specified ring-pages of %u " - "exceeds backend limit of %zu. " + "exceeds backend limit of %u. " "Unable to connect.", xbb->ring_config.ring_pages, XBB_MAX_RING_PAGES); return (EINVAL); - } else if (xbb->max_requests > XBB_MAX_REQUESTS) { - xenbus_dev_fatal(xbb->dev, EINVAL, - "Front-end specified max_requests of %u " - "exceeds backend limit of %u. " - "Unable to connect.", - xbb->max_requests, - XBB_MAX_REQUESTS); - return (EINVAL); - } else if (xbb->max_request_segments > XBB_MAX_SEGMENTS_PER_REQUEST) { - xenbus_dev_fatal(xbb->dev, EINVAL, - "Front-end specified max_requests_segments " - "of %u exceeds backend limit of %u. " - "Unable to connect.", - xbb->max_request_segments, - XBB_MAX_SEGMENTS_PER_REQUEST); - return (EINVAL); - } else if (xbb->max_request_size > XBB_MAX_REQUEST_SIZE) { - xenbus_dev_fatal(xbb->dev, EINVAL, - "Front-end specified max_request_size " - "of %u exceeds backend limit of %u. " - "Unable to connect.", - xbb->max_request_size, - XBB_MAX_REQUEST_SIZE); - return (EINVAL); } if (xbb->ring_config.ring_pages == 1) { @@ -3723,18 +3647,6 @@ xbb_attach(device_t dev) return (error); } - /* - * Amazon EC2 client compatility. They refer to max-ring-pages - * instead of to max-ring-page-order. - */ - error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), - "max-ring-pages", "%zu", XBB_MAX_RING_PAGES); - if (error) { - xbb_attach_failed(xbb, error, "writing %s/max-ring-pages", - xenbus_get_node(xbb->dev)); - return (error); - } - max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1; error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "max-ring-page-order", "%u", max_ring_page_order); @@ -3744,32 +3656,6 @@ xbb_attach(device_t dev) return (error); } - error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), - "max-requests", "%u", XBB_MAX_REQUESTS); - if (error) { - xbb_attach_failed(xbb, error, "writing %s/max-requests", - xenbus_get_node(xbb->dev)); - return (error); - } - - error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), - "max-request-segments", "%u", - XBB_MAX_SEGMENTS_PER_REQUEST); - if (error) { - xbb_attach_failed(xbb, error, "writing %s/max-request-segments", - xenbus_get_node(xbb->dev)); - return (error); - } - - error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), - "max-request-size", "%u", - XBB_MAX_REQUEST_SIZE); - if (error) { - xbb_attach_failed(xbb, error, "writing %s/max-request-size", - xenbus_get_node(xbb->dev)); - return (error); - } - /* Collect physical device information. */ error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev), "device-type", NULL, &xbb->dev_type, Modified: head/sys/dev/xen/blkfront/blkfront.c ============================================================================== --- head/sys/dev/xen/blkfront/blkfront.c Fri Jun 12 07:23:55 2015 (r284295) +++ head/sys/dev/xen/blkfront/blkfront.c Fri Jun 12 07:50:34 2015 (r284296) @@ -174,7 +174,6 @@ xbd_queue_cb(void *arg, bus_dma_segment_ sc = cm->cm_sc; if (error) { - printf("error %d in xbd_queue_cb\n", error); cm->cm_bp->bio_error = EIO; biodone(cm->cm_bp); xbd_free_command(cm); @@ -191,55 +190,44 @@ xbd_queue_cb(void *arg, bus_dma_segment_ ring_req->nr_segments = nsegs; cm->cm_nseg = nsegs; - block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK); + block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_REQUEST); sg = ring_req->seg; last_block_sg = sg + block_segs; sg_ref = cm->cm_sg_refs; - while (1) { + while (sg < last_block_sg) { + buffer_ma = segs->ds_addr; + fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; + lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; - while (sg < last_block_sg) { - buffer_ma = segs->ds_addr; - fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; - lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; + KASSERT(lsect <= 7, ("XEN disk driver data cannot " + "cross a page boundary")); - KASSERT(lsect <= 7, ("XEN disk driver data cannot " - "cross a page boundary")); + /* install a grant reference. */ + ref = gnttab_claim_grant_reference(&cm->cm_gref_head); - /* install a grant reference. */ - ref = gnttab_claim_grant_reference(&cm->cm_gref_head); - - /* - * GNTTAB_LIST_END == 0xffffffff, but it is private - * to gnttab.c. - */ - KASSERT(ref != ~0, ("grant_reference failed")); - - gnttab_grant_foreign_access_ref( - ref, - xenbus_get_otherend_id(sc->xbd_dev), - buffer_ma >> PAGE_SHIFT, - ring_req->operation == BLKIF_OP_WRITE); - - *sg_ref = ref; - *sg = (struct blkif_request_segment) { - .gref = ref, - .first_sect = fsect, - .last_sect = lsect - }; - sg++; - sg_ref++; - segs++; - nsegs--; - } - block_segs = MIN(nsegs, BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK); - if (block_segs == 0) - break; + /* + * GNTTAB_LIST_END == 0xffffffff, but it is private + * to gnttab.c. + */ + KASSERT(ref != ~0, ("grant_reference failed")); - sg = BLKRING_GET_SEG_BLOCK(&sc->xbd_ring, - sc->xbd_ring.req_prod_pvt); - sc->xbd_ring.req_prod_pvt++; - last_block_sg = sg + block_segs; + gnttab_grant_foreign_access_ref( + ref, + xenbus_get_otherend_id(sc->xbd_dev), + buffer_ma >> PAGE_SHIFT, + ring_req->operation == BLKIF_OP_WRITE); + + *sg_ref = ref; + *sg = (struct blkif_request_segment) { + .gref = ref, + .first_sect = fsect, + .last_sect = lsect + }; + sg++; + sg_ref++; + segs++; + nsegs--; } if (cm->cm_operation == BLKIF_OP_READ) @@ -396,8 +384,8 @@ xbd_startio(struct xbd_softc *sc) if (sc->xbd_state != XBD_STATE_CONNECTED) return; - while (RING_FREE_REQUESTS(&sc->xbd_ring) >= - sc->xbd_max_request_blocks) { + while (!RING_FULL(&sc->xbd_ring)) { + if (sc->xbd_qfrozen_cnt != 0) break; @@ -450,13 +438,6 @@ xbd_bio_complete(struct xbd_softc *sc, s biodone(bp); } -static int -xbd_completion(struct xbd_command *cm) -{ - gnttab_end_foreign_access_references(cm->cm_nseg, cm->cm_sg_refs); - return (BLKIF_SEGS_TO_BLOCKS(cm->cm_nseg)); -} - static void xbd_int(void *xsc) { @@ -482,7 +463,9 @@ xbd_int(void *xsc) cm = &sc->xbd_shadow[bret->id]; xbd_remove_cm(cm, XBD_Q_BUSY); - i += xbd_completion(cm); + gnttab_end_foreign_access_references(cm->cm_nseg, + cm->cm_sg_refs); + i++; if (cm->cm_operation == BLKIF_OP_READ) op = BUS_DMASYNC_POSTREAD; @@ -1064,11 +1047,9 @@ xbd_initialize(struct xbd_softc *sc) */ max_ring_page_order = 0; sc->xbd_ring_pages = 1; - sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; + sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; sc->xbd_max_request_size = XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); - sc->xbd_max_request_blocks = - BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments); /* * Protocol negotiation. @@ -1095,24 +1076,10 @@ xbd_initialize(struct xbd_softc *sc) if (sc->xbd_ring_pages < 1) sc->xbd_ring_pages = 1; - sc->xbd_max_requests = - BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE); - (void)xs_scanf(XST_NIL, otherend_path, - "max-requests", NULL, "%" PRIu32, - &sc->xbd_max_requests); - - (void)xs_scanf(XST_NIL, otherend_path, - "max-request-segments", NULL, "%" PRIu32, - &sc->xbd_max_request_segments); - - (void)xs_scanf(XST_NIL, otherend_path, - "max-request-size", NULL, "%" PRIu32, - &sc->xbd_max_request_size); - if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) { device_printf(sc->xbd_dev, "Back-end specified ring-pages of %u " - "limited to front-end limit of %zu.\n", + "limited to front-end limit of %u.\n", sc->xbd_ring_pages, XBD_MAX_RING_PAGES); sc->xbd_ring_pages = XBD_MAX_RING_PAGES; } @@ -1128,46 +1095,16 @@ xbd_initialize(struct xbd_softc *sc) sc->xbd_ring_pages = new_page_limit; } + sc->xbd_max_requests = + BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE); if (sc->xbd_max_requests > XBD_MAX_REQUESTS) { device_printf(sc->xbd_dev, "Back-end specified max_requests of %u " - "limited to front-end limit of %u.\n", + "limited to front-end limit of %zu.\n", sc->xbd_max_requests, XBD_MAX_REQUESTS); sc->xbd_max_requests = XBD_MAX_REQUESTS; } - if (sc->xbd_max_request_segments > XBD_MAX_SEGMENTS_PER_REQUEST) { - device_printf(sc->xbd_dev, - "Back-end specified max_request_segments of %u " - "limited to front-end limit of %u.\n", - sc->xbd_max_request_segments, - XBD_MAX_SEGMENTS_PER_REQUEST); - sc->xbd_max_request_segments = XBD_MAX_SEGMENTS_PER_REQUEST; - } - - if (sc->xbd_max_request_size > XBD_MAX_REQUEST_SIZE) { - device_printf(sc->xbd_dev, - "Back-end specified max_request_size of %u " - "limited to front-end limit of %u.\n", - sc->xbd_max_request_size, - XBD_MAX_REQUEST_SIZE); - sc->xbd_max_request_size = XBD_MAX_REQUEST_SIZE; - } - - if (sc->xbd_max_request_size > - XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)) { - device_printf(sc->xbd_dev, - "Back-end specified max_request_size of %u " - "limited to front-end limit of %u. (Too few segments.)\n", - sc->xbd_max_request_size, - XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)); - sc->xbd_max_request_size = - XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); - } - - sc->xbd_max_request_blocks = - BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments); - /* Allocate datastructures based on negotiated values. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->xbd_dev), /* parent */ @@ -1241,36 +1178,6 @@ xbd_initialize(struct xbd_softc *sc) } } - error = xs_printf(XST_NIL, node_path, - "max-requests","%u", - sc->xbd_max_requests); - if (error) { - xenbus_dev_fatal(sc->xbd_dev, error, - "writing %s/max-requests", - node_path); - return; - } - - error = xs_printf(XST_NIL, node_path, - "max-request-segments","%u", - sc->xbd_max_request_segments); - if (error) { - xenbus_dev_fatal(sc->xbd_dev, error, - "writing %s/max-request-segments", - node_path); - return; - } - - error = xs_printf(XST_NIL, node_path, - "max-request-size","%u", - sc->xbd_max_request_size); - if (error) { - xenbus_dev_fatal(sc->xbd_dev, error, - "writing %s/max-request-size", - node_path); - return; - } - error = xs_printf(XST_NIL, node_path, "event-channel", "%u", xen_intr_port(sc->xen_intr_handle)); if (error) { Modified: head/sys/dev/xen/blkfront/block.h ============================================================================== --- head/sys/dev/xen/blkfront/block.h Fri Jun 12 07:23:55 2015 (r284295) +++ head/sys/dev/xen/blkfront/block.h Fri Jun 12 07:50:34 2015 (r284296) @@ -61,11 +61,19 @@ ((size / PAGE_SIZE) + 1) /** + * The maximum number of shared memory ring pages we will allow in a + * negotiated block-front/back communication channel. Allow enough + * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. + */ +#define XBD_MAX_RING_PAGES 32 + +/** * The maximum number of outstanding requests blocks (request headers plus * additional segment blocks) we will allow in a negotiated block-front/back * communication channel. */ -#define XBD_MAX_REQUESTS 256 +#define XBD_MAX_REQUESTS \ + __CONST_RING_SIZE(blkif, PAGE_SIZE * XBD_MAX_RING_PAGES) /** * The maximum mapped region size per request we will allow in a negotiated @@ -83,15 +91,6 @@ (MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ XBD_SIZE_TO_SEGS(XBD_MAX_REQUEST_SIZE))) -/** - * The maximum number of shared memory ring pages we will allow in a - * negotiated block-front/back communication channel. Allow enough - * ring space for all requests to be XBD_MAX_REQUEST_SIZE'd. - */ -#define XBD_MAX_RING_PAGES \ - BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBD_MAX_SEGMENTS_PER_REQUEST) \ - * XBD_MAX_REQUESTS) - typedef enum { XBDCF_Q_MASK = 0xFF, /* This command has contributed to xbd_qfrozen_cnt. */ @@ -175,7 +174,6 @@ struct xbd_softc { u_int xbd_ring_pages; uint32_t xbd_max_requests; uint32_t xbd_max_request_segments; - uint32_t xbd_max_request_blocks; uint32_t xbd_max_request_size; grant_ref_t xbd_ring_ref[XBD_MAX_RING_PAGES]; blkif_front_ring_t xbd_ring; Modified: head/sys/xen/blkif.h ============================================================================== --- head/sys/xen/blkif.h Fri Jun 12 07:23:55 2015 (r284295) +++ head/sys/xen/blkif.h Fri Jun 12 07:50:34 2015 (r284296) @@ -46,7 +46,7 @@ struct blkif_x86_32_request { blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ - struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK]; + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_32_response { uint64_t id; /* copied from request */ @@ -64,7 +64,7 @@ struct blkif_x86_64_request { blkif_vdev_t handle; /* only for read/write requests */ uint64_t __attribute__((__aligned__(8))) id; blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ - struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK]; + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; struct blkif_x86_64_response { uint64_t __attribute__((__aligned__(8))) id; @@ -114,7 +114,7 @@ enum blkif_protocol { static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) { - int i, n = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; @@ -129,7 +129,7 @@ static void inline blkif_get_x86_32_req( static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) { - int i, n = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK; + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; dst->operation = src->operation; dst->nr_segments = src->nr_segments; dst->handle = src->handle; Modified: head/sys/xen/interface/io/blkif.h ============================================================================== --- head/sys/xen/interface/io/blkif.h Fri Jun 12 07:23:55 2015 (r284295) +++ head/sys/xen/interface/io/blkif.h Fri Jun 12 07:50:34 2015 (r284296) @@ -457,21 +457,9 @@ #define BLKIF_OP_DISCARD 5 /* - * Maximum scatter/gather segments associated with a request header block. - * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE. - * NB. This could be 12 if the ring indexes weren't stored in the same page. - */ -#define BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK 11 - -/* - * Maximum scatter/gather segments associated with a segment block. - */ -#define BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK 14 - -/* * Maximum scatter/gather segments per request (header + segment blocks). */ -#define BLKIF_MAX_SEGMENTS_PER_REQUEST 255 +#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 /* * NB. first_sect and last_sect in blkif_request_segment, as well as @@ -512,22 +500,11 @@ struct blkif_request { blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ - blkif_request_segment_t seg[BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK]; + blkif_request_segment_t seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct blkif_request blkif_request_t; /* - * A segment block is a ring request structure that contains only - * segment data. - * - * sizeof(struct blkif_segment_block) <= sizeof(struct blkif_request) - */ -struct blkif_segment_block { - blkif_request_segment_t seg[BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK]; -}; -typedef struct blkif_segment_block blkif_segment_block_t; - -/* * Cast to this structure when blkif_request.operation == BLKIF_OP_DISCARD * sizeof(struct blkif_request_discard) <= sizeof(struct blkif_request) */ @@ -564,21 +541,6 @@ typedef struct blkif_response blkif_resp */ DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response); -/* - * Index to, and treat as a segment block, an entry in the ring. - */ -#define BLKRING_GET_SEG_BLOCK(_r, _idx) \ - (((blkif_segment_block_t *)RING_GET_REQUEST(_r, _idx))->seg) - -/* - * The number of ring request blocks required to handle an I/O - * request containing _segs segments. - */ -#define BLKIF_SEGS_TO_BLOCKS(_segs) \ - ((((_segs - BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK) \ - + (BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK - 1)) \ - / BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK) + /*header_block*/1) - #define VDISK_CDROM 0x1 #define VDISK_REMOVABLE 0x2 #define VDISK_READONLY 0x4
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201506120750.t5C7oZ92025431>