Date: Wed, 26 Jun 2013 23:27:17 +0000 (UTC) From: Jim Harris <jimharris@FreeBSD.org> To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r252271 - head/sys/dev/nvme Message-ID: <201306262327.r5QNRHJI004428@svn.freebsd.org>
next in thread | raw e-mail | index | archive | help
Author: jimharris Date: Wed Jun 26 23:27:17 2013 New Revision: 252271 URL: http://svnweb.freebsd.org/changeset/base/252271 Log: Use MAXPHYS to specify the maximum I/O size for nvme(4). Also allow admin commands to transfer up to this maximum I/O size, rather than the artificial limit previously imposed. The larger I/O size is very beneficial for upcoming firmware download support. This has the added benefit of simplifying the code since both admin and I/O commands now use the same maximum I/O size. Sponsored by: Intel MFC after: 3 days Modified: head/sys/dev/nvme/nvme.h head/sys/dev/nvme/nvme_ctrlr.c head/sys/dev/nvme/nvme_private.h head/sys/dev/nvme/nvme_qpair.c Modified: head/sys/dev/nvme/nvme.h ============================================================================== --- head/sys/dev/nvme/nvme.h Wed Jun 26 23:20:08 2013 (r252270) +++ head/sys/dev/nvme/nvme.h Wed Jun 26 23:27:17 2013 (r252271) @@ -33,6 +33,8 @@ #include <sys/types.h> #endif +#include <sys/param.h> + #define NVME_PASSTHROUGH_CMD _IOWR('n', 0, struct nvme_pt_command) #define NVME_RESET_CONTROLLER _IO('n', 1) @@ -45,6 +47,8 @@ */ #define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF) +#define NVME_MAX_XFER_SIZE MAXPHYS + union cap_lo_register { uint32_t raw; struct { Modified: head/sys/dev/nvme/nvme_ctrlr.c ============================================================================== --- head/sys/dev/nvme/nvme_ctrlr.c Wed Jun 26 23:20:08 2013 (r252270) +++ head/sys/dev/nvme/nvme_ctrlr.c Wed Jun 26 23:27:17 2013 (r252271) @@ -222,7 +222,6 @@ nvme_ctrlr_construct_admin_qpair(struct 0, /* vector */ num_entries, NVME_ADMIN_TRACKERS, - 16*1024, /* max xfer size */ ctrlr); } @@ -256,16 +255,6 @@ nvme_ctrlr_construct_io_qpairs(struct nv */ num_trackers = min(num_trackers, (num_entries-1)); - ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; - TUNABLE_INT_FETCH("hw.nvme.max_xfer_size", &ctrlr->max_xfer_size); - /* - * Check that tunable doesn't specify a size greater than what our - * driver supports, and is an even PAGE_SIZE multiple. - */ - if (ctrlr->max_xfer_size > NVME_MAX_XFER_SIZE || - ctrlr->max_xfer_size % PAGE_SIZE) - ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; - ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair), M_NVME, M_ZERO | M_WAITOK); @@ -284,7 +273,6 @@ nvme_ctrlr_construct_io_qpairs(struct nv ctrlr->msix_enabled ? i+1 : 0, /* vector */ num_entries, num_trackers, - ctrlr->max_xfer_size, ctrlr); if (ctrlr->per_cpu_io_queues) @@ -1089,8 +1077,8 @@ intx: if (!ctrlr->msix_enabled) nvme_ctrlr_configure_intx(ctrlr); + ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE; nvme_ctrlr_construct_admin_qpair(ctrlr); - status = nvme_ctrlr_construct_io_qpairs(ctrlr); if (status != 0) Modified: head/sys/dev/nvme/nvme_private.h ============================================================================== --- head/sys/dev/nvme/nvme_private.h Wed Jun 26 23:20:08 2013 (r252270) +++ head/sys/dev/nvme/nvme_private.h Wed Jun 26 23:27:17 2013 (r252271) @@ -60,8 +60,6 @@ MALLOC_DECLARE(M_NVME); #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */ #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */ -#define NVME_MAX_PRP_LIST_ENTRIES (32) - /* * For commands requiring more than 2 PRP entries, one PRP will be * embedded in the command (prp1), and the rest of the PRP entries @@ -69,7 +67,7 @@ MALLOC_DECLARE(M_NVME); * that real max number of PRP entries we support is 32+1, which * results in a max xfer size of 32*PAGE_SIZE. */ -#define NVME_MAX_XFER_SIZE NVME_MAX_PRP_LIST_ENTRIES * PAGE_SIZE +#define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE) #define NVME_ADMIN_TRACKERS (16) #define NVME_ADMIN_ENTRIES (128) @@ -194,7 +192,6 @@ struct nvme_qpair { struct resource *res; void *tag; - uint32_t max_xfer_size; uint32_t num_entries; uint32_t num_trackers; uint32_t sq_tdbl_off; @@ -446,7 +443,7 @@ void nvme_ctrlr_post_failed_request(stru void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, uint16_t vector, uint32_t num_entries, - uint32_t num_trackers, uint32_t max_xfer_size, + uint32_t num_trackers, struct nvme_controller *ctrlr); void nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr); Modified: head/sys/dev/nvme/nvme_qpair.c ============================================================================== --- head/sys/dev/nvme/nvme_qpair.c Wed Jun 26 23:20:08 2013 (r252270) +++ head/sys/dev/nvme/nvme_qpair.c Wed Jun 26 23:27:17 2013 (r252271) @@ -460,7 +460,7 @@ nvme_qpair_msix_handler(void *arg) void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id, uint16_t vector, uint32_t num_entries, uint32_t num_trackers, - uint32_t max_xfer_size, struct nvme_controller *ctrlr) + struct nvme_controller *ctrlr) { struct nvme_tracker *tr; uint32_t i; @@ -478,7 +478,6 @@ nvme_qpair_construct(struct nvme_qpair * num_trackers = min(num_trackers, 64); #endif qpair->num_trackers = num_trackers; - qpair->max_xfer_size = max_xfer_size; qpair->ctrlr = ctrlr; if (ctrlr->msix_enabled) { @@ -501,8 +500,8 @@ nvme_qpair_construct(struct nvme_qpair * bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), sizeof(uint64_t), PAGE_SIZE, BUS_SPACE_MAXADDR, - BUS_SPACE_MAXADDR, NULL, NULL, qpair->max_xfer_size, - (qpair->max_xfer_size/PAGE_SIZE)+1, PAGE_SIZE, 0, + BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE, + (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0, NULL, NULL, &qpair->dma_tag); qpair->num_cmds = 0;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201306262327.r5QNRHJI004428>