Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 23 Mar 2018 18:35:59 +0000 (UTC)
From:      Hans Petter Selasky <hselasky@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r331455 - head/sys/dev/mlx5/mlx5_core
Message-ID:  <201803231835.w2NIZxI4066189@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: hselasky
Date: Fri Mar 23 18:35:59 2018
New Revision: 331455
URL: https://svnweb.freebsd.org/changeset/base/331455

Log:
  Fix incorrect page count when mlx5core is in internal error.
  
  Change page cleanup flow when in internal error to properly decrement
  the page counts when reclaiming pages. That prevents timing out
  waiting for extra pages that were actually cleaned up previously.
  
  Submitted by:	slavash@
  MFC after:	1 week
  Sponsored by:	Mellanox Technologies

Modified:
  head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c	Fri Mar 23 18:34:38 2018	(r331454)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c	Fri Mar 23 18:35:59 2018	(r331455)
@@ -380,6 +380,37 @@ out_free:
 	return err;
 }
 
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+			     u32 *in, int in_size, u32 *out, int out_size)
+{
+	struct mlx5_fw_page *fwp;
+	struct rb_node *p;
+	u32 func_id;
+	u32 npages;
+	u32 i = 0;
+
+	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
+
+	/* No hard feelings, we want our pages back! */
+	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+	func_id = MLX5_GET(manage_pages_in, in, function_id);
+
+	p = rb_first(&dev->priv.page_root);
+	while (p && i < npages) {
+		fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
+		p = rb_next(p);
+		if (fwp->func_id != func_id)
+			continue;
+
+		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->dma_addr);
+		i++;
+	}
+
+	MLX5_SET(manage_pages_out, out, output_num_entries, i);
+	return 0;
+}
+
 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
 			 int *nclaimed)
 {
@@ -404,7 +435,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u3
 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
 
 	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
 	if (err) {
 		mlx5_core_err(dev, "failed reclaiming pages\n");
 		goto out_free;
@@ -531,19 +562,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *d
 		p = rb_first(&dev->priv.page_root);
 		if (p) {
 			fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
-			if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-				--dev->priv.fw_pages;
-				free_4k(dev, fwp->dma_addr);
-				nclaimed = 1;
-			} else {
-				err = reclaim_pages(dev, fwp->func_id,
-						    optimal_reclaimed_pages(),
-						    &nclaimed);
-				if (err) {
-					mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
-						       err);
-					return err;
-				}
+			err = reclaim_pages(dev, fwp->func_id,
+					    optimal_reclaimed_pages(),
+					    &nclaimed);
+			if (err) {
+				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+					       err);
+				return err;
 			}
 
 			if (nclaimed)



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201803231835.w2NIZxI4066189>