ibmvnic: Add ethtool private flag for driver-defined queue limits
authorThomas Falcon <tlfalcon@linux.ibm.com>
Fri, 28 Sep 2018 23:38:26 +0000 (18:38 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 2 Oct 2018 06:31:16 +0000 (23:31 -0700)
When choosing channel amounts and ring sizes, the maximums in the
ibmvnic driver are defined by the virtual i/o server management
partition. Even though they are defined as maximums, the client
driver may in fact successfully request resources that exceed
these limits, which are mostly dependent on a user's hardware

With this in mind, provide an ethtool flag that when enabled will
allow the user to request resources limited by driver-defined
maximums instead of limits defined by the management partition.
The driver will try to honor the user's request but may not allowed
by the management partition. In this case, the driver requests
as close as it can get to the desired amount until it succeeds.

Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h

index a8369addfe688412212d8eb7716f6cbbcbb41d28..ad898e8eaca1609046fbb9c020c7ad2fb547864f 100644 (file)
@@ -2364,8 +2364,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
-       ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+               ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+               ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+       } else {
+               ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+               ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+       }
        ring->rx_mini_max_pending = 0;
        ring->rx_jumbo_max_pending = 0;
        ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -2378,21 +2383,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
                                 struct ethtool_ringparam *ring)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       int ret;
 
-       if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
-           ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
-               netdev_err(netdev, "Invalid request.\n");
-               netdev_err(netdev, "Max tx buffers = %llu\n",
-                          adapter->max_rx_add_entries_per_subcrq);
-               netdev_err(netdev, "Max rx buffers = %llu\n",
-                          adapter->max_tx_entries_per_subcrq);
-               return -EINVAL;
-       }
-
+       ret = 0;
        adapter->desired.rx_entries = ring->rx_pending;
        adapter->desired.tx_entries = ring->tx_pending;
 
-       return wait_for_reset(adapter);
+       ret = wait_for_reset(adapter);
+
+       if (!ret &&
+           (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
+            adapter->req_tx_entries_per_subcrq != ring->tx_pending))
+               netdev_info(netdev,
+                           "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+                           ring->rx_pending, ring->tx_pending,
+                           adapter->req_rx_add_entries_per_subcrq,
+                           adapter->req_tx_entries_per_subcrq);
+       return ret;
 }
 
 static void ibmvnic_get_channels(struct net_device *netdev,
@@ -2400,8 +2407,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       channels->max_rx = adapter->max_rx_queues;
-       channels->max_tx = adapter->max_tx_queues;
+       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+               channels->max_rx = adapter->max_rx_queues;
+               channels->max_tx = adapter->max_tx_queues;
+       } else {
+               channels->max_rx = IBMVNIC_MAX_QUEUES;
+               channels->max_tx = IBMVNIC_MAX_QUEUES;
+       }
+
        channels->max_other = 0;
        channels->max_combined = 0;
        channels->rx_count = adapter->req_rx_queues;
@@ -2414,11 +2427,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
                                struct ethtool_channels *channels)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       int ret;
 
+       ret = 0;
        adapter->desired.rx_queues = channels->rx_count;
        adapter->desired.tx_queues = channels->tx_count;
 
-       return wait_for_reset(adapter);
+       ret = wait_for_reset(adapter);
+
+       if (!ret &&
+           (adapter->req_rx_queues != channels->rx_count ||
+            adapter->req_tx_queues != channels->tx_count))
+               netdev_info(netdev,
+                           "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+                           channels->rx_count, channels->tx_count,
+                           adapter->req_rx_queues, adapter->req_tx_queues);
+       return ret;
+
 }
 
 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2426,32 +2451,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
        struct ibmvnic_adapter *adapter = netdev_priv(dev);
        int i;
 
-       if (stringset != ETH_SS_STATS)
-               return;
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
+                               i++, data += ETH_GSTRING_LEN)
+                       memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
 
-       for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
-               memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+               for (i = 0; i < adapter->req_tx_queues; i++) {
+                       snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+                       data += ETH_GSTRING_LEN;
 
-       for (i = 0; i < adapter->req_tx_queues; i++) {
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+                       data += ETH_GSTRING_LEN;
 
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN,
+                                "tx%d_dropped_packets", i);
+                       data += ETH_GSTRING_LEN;
+               }
 
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
-               data += ETH_GSTRING_LEN;
-       }
+               for (i = 0; i < adapter->req_rx_queues; i++) {
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+                       data += ETH_GSTRING_LEN;
 
-       for (i = 0; i < adapter->req_rx_queues; i++) {
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+                       data += ETH_GSTRING_LEN;
 
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
 
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
-               data += ETH_GSTRING_LEN;
+       case ETH_SS_PRIV_FLAGS:
+               for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
+                       strcpy(data + i * ETH_GSTRING_LEN,
+                              ibmvnic_priv_flags[i]);
+               break;
+       default:
+               return;
        }
 }
 
@@ -2464,6 +2500,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
                return ARRAY_SIZE(ibmvnic_stats) +
                       adapter->req_tx_queues * NUM_TX_STATS +
                       adapter->req_rx_queues * NUM_RX_STATS;
+       case ETH_SS_PRIV_FLAGS:
+               return ARRAY_SIZE(ibmvnic_priv_flags);
        default:
                return -EOPNOTSUPP;
        }
@@ -2514,6 +2552,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
        }
 }
 
+static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+       return adapter->priv_flags;
+}
+
+static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
+
+       if (which_maxes)
+               adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
+       else
+               adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
+
+       return 0;
+}
 static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_drvinfo            = ibmvnic_get_drvinfo,
        .get_msglevel           = ibmvnic_get_msglevel,
@@ -2527,6 +2584,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_sset_count         = ibmvnic_get_sset_count,
        .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
        .get_link_ksettings     = ibmvnic_get_link_ksettings,
+       .get_priv_flags         = ibmvnic_get_priv_flags,
+       .set_priv_flags         = ibmvnic_set_priv_flags,
 };
 
 /* Routines for managing CRQs/sCRQs  */
index f9a12e5843c4fbc04c196a8163bb59661013cd1e..18103b811d4db398df7ce6a6e27c6bda2077c4c2 100644 (file)
 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
 #define IBMVNIC_BUFFER_HLEN 500
 
+static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
+#define IBMVNIC_USE_SERVER_MAXES 0x1
+       "use-server-maxes"
+};
+
 struct ibmvnic_login_buffer {
        __be32 len;
        __be32 version;
@@ -970,6 +975,7 @@ struct ibmvnic_adapter {
        struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
        dma_addr_t ip_offload_ctrl_tok;
        u32 msg_enable;
+       u32 priv_flags;
 
        /* Vital Product Data (VPD) */
        struct ibmvnic_vpd *vpd;