if (!ltb->buff)
return;
- if (!adapter->failover)
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
+ adapter->reset_reason != VNIC_RESET_MOBILITY)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
{
int i;
- if (adapter->migrated)
- return;
-
adapter->replenish_task_cycles++;
for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
i++) {
bool resend;
int rc;
- if (adapter->logical_link_state == link_state) {
- netdev_dbg(netdev, "Link state already %d\n", link_state);
- return 0;
- }
-
netdev_err(netdev, "setting link state %d\n", link_state);
memset(&crq, 0, sizeof(crq));
crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
return rc;
}
-static int ibmvnic_open(struct net_device *netdev)
+static int __ibmvnic_open(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ enum vnic_state prev_state = adapter->state;
int i, rc;
adapter->state = VNIC_OPENING;
-
- if (adapter->state == VNIC_CLOSED) {
- rc = ibmvnic_init(adapter);
- if (rc)
- return rc;
- }
-
- rc = ibmvnic_login(netdev);
- if (rc)
- return rc;
-
- rc = init_resources(adapter);
- if (rc)
- return rc;
-
replenish_pools(adapter);
for (i = 0; i < adapter->req_rx_queues; i++)
/* We're ready to receive frames, enable the sub-crq interrupts and
* set the logical link state to up
*/
- for (i = 0; i < adapter->req_rx_queues; i++)
- enable_scrq_irq(adapter, adapter->rx_scrq[i]);
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ if (prev_state == VNIC_CLOSED)
+ enable_irq(adapter->rx_scrq[i]->irq);
+ else
+ enable_scrq_irq(adapter, adapter->rx_scrq[i]);
+ }
- for (i = 0; i < adapter->req_tx_queues; i++)
- enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ if (prev_state == VNIC_CLOSED)
+ enable_irq(adapter->tx_scrq[i]->irq);
+ else
+ enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ }
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
release_resources(adapter);
- } else {
- netif_tx_start_all_queues(netdev);
- adapter->state = VNIC_OPEN;
+ return rc;
}
+ netif_tx_start_all_queues(netdev);
+
+ if (prev_state == VNIC_CLOSED) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_schedule(&adapter->napi[i]);
+ }
+
+ adapter->state = VNIC_OPEN;
+ return rc;
+}
+
+static int ibmvnic_open(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ mutex_lock(&adapter->reset_lock);
+
+ if (adapter->state != VNIC_CLOSED) {
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ mutex_unlock(&adapter->reset_lock);
+ return rc;
+ }
+
+ rc = init_resources(adapter);
+ if (rc) {
+ netdev_err(netdev, "failed to initialize resources\n");
+ release_resources(adapter);
+ mutex_unlock(&adapter->reset_lock);
+ return rc;
+ }
+ }
+
+ rc = __ibmvnic_open(netdev);
+ mutex_unlock(&adapter->reset_lock);
+
return rc;
}
}
}
-static int ibmvnic_close(struct net_device *netdev)
+static int __ibmvnic_close(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc = 0;
int i;
adapter->state = VNIC_CLOSING;
+ netif_tx_stop_all_queues(netdev);
disable_sub_crqs(adapter);
if (adapter->napi) {
napi_disable(&adapter->napi[i]);
}
- if (!adapter->failover)
- netif_tx_stop_all_queues(netdev);
-
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- release_resources(adapter);
-
adapter->state = VNIC_CLOSED;
return rc;
}
+static int ibmvnic_close(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ mutex_lock(&adapter->reset_lock);
+ rc = __ibmvnic_close(netdev);
+ mutex_unlock(&adapter->reset_lock);
+
+ return rc;
+}
+
/**
* build_hdr_data - creates L2/L3/L4 header data buffer
* @hdr_field - bitfield determining needed headers
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->
off_txsubm_subcrqs));
- if (adapter->migrated) {
+ if (adapter->resetting) {
if (!netif_subqueue_stopped(netdev, skb))
netif_stop_subqueue(netdev, queue_num);
dev_kfree_skb_any(skb);
return 0;
}
-static void ibmvnic_tx_timeout(struct net_device *dev)
+/**
+ * do_reset returns zero if we are able to keep processing reset events, or
+ * non-zero if we hit a fatal error and must halt.
+ */
+static int do_reset(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rwi *rwi, u32 reset_state)
{
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
- int rc;
+ struct net_device *netdev = adapter->netdev;
+ int i, rc;
+
+ netif_carrier_off(netdev);
+ adapter->reset_reason = rwi->reset_reason;
+
+ if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ return 0;
+ }
+
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ return rc;
+
+ /* remove the closed state so when we call open it appears
+ * we are coming from the probed state.
+ */
+ adapter->state = VNIC_PROBED;
- /* Adapter timed out, resetting it */
+ release_resources(adapter);
release_sub_crqs(adapter);
- rc = ibmvnic_reset_crq(adapter);
+ release_crq_queue(adapter);
+
+ rc = ibmvnic_init(adapter);
if (rc)
- dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
- else
- ibmvnic_send_crq_init(adapter);
+ return 0;
+
+ /* If the adapter was in PROBE state prior to the reset, exit here. */
+ if (reset_state == VNIC_PROBED)
+ return 0;
+
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ adapter->state = VNIC_PROBED;
+ return 0;
+ }
+
+ rtnl_lock();
+ rc = init_resources(adapter);
+ rtnl_unlock();
+ if (rc)
+ return rc;
+
+ if (reset_state == VNIC_CLOSED)
+ return 0;
+
+ rc = __ibmvnic_open(netdev);
+ if (rc) {
+ if (list_empty(&adapter->rwi_list))
+ adapter->state = VNIC_CLOSED;
+ else
+ adapter->state = reset_state;
+
+ return 0;
+ }
+
+ netif_carrier_on(netdev);
+
+ /* kick napi */
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_schedule(&adapter->napi[i]);
+
+ return 0;
+}
+
+static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rwi *rwi;
+
+ mutex_lock(&adapter->rwi_lock);
+
+ if (!list_empty(&adapter->rwi_list)) {
+ rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
+ list);
+ list_del(&rwi->list);
+ } else {
+ rwi = NULL;
+ }
+
+ mutex_unlock(&adapter->rwi_lock);
+ return rwi;
+}
+
+static void free_all_rwi(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rwi *rwi;
+
+ rwi = get_next_rwi(adapter);
+ while (rwi) {
+ kfree(rwi);
+ rwi = get_next_rwi(adapter);
+ }
+}
+
+static void __ibmvnic_reset(struct work_struct *work)
+{
+ struct ibmvnic_rwi *rwi;
+ struct ibmvnic_adapter *adapter;
+ struct net_device *netdev;
+ u32 reset_state;
+ int rc;
+
+ adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ netdev = adapter->netdev;
+
+ mutex_lock(&adapter->reset_lock);
+ adapter->resetting = true;
+ reset_state = adapter->state;
+
+ rwi = get_next_rwi(adapter);
+ while (rwi) {
+ rc = do_reset(adapter, rwi, reset_state);
+ kfree(rwi);
+ if (rc)
+ break;
+
+ rwi = get_next_rwi(adapter);
+ }
+
+ if (rc) {
+ free_all_rwi(adapter);
+ return;
+ }
+
+ adapter->resetting = false;
+ mutex_unlock(&adapter->reset_lock);
+}
+
+static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ enum ibmvnic_reset_reason reason)
+{
+ struct ibmvnic_rwi *rwi, *tmp;
+ struct net_device *netdev = adapter->netdev;
+ struct list_head *entry;
+
+ if (adapter->state == VNIC_REMOVING ||
+ adapter->state == VNIC_REMOVED) {
+ netdev_dbg(netdev, "Adapter removing, skipping reset\n");
+ return;
+ }
+
+ mutex_lock(&adapter->rwi_lock);
+
+ list_for_each(entry, &adapter->rwi_list) {
+ tmp = list_entry(entry, struct ibmvnic_rwi, list);
+ if (tmp->reset_reason == reason) {
+ netdev_err(netdev, "Matching reset found, skipping\n");
+ mutex_unlock(&adapter->rwi_lock);
+ return;
+ }
+ }
+
+ rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+ if (!rwi) {
+ mutex_unlock(&adapter->rwi_lock);
+ ibmvnic_close(netdev);
+ return;
+ }
+
+ rwi->reset_reason = reason;
+ list_add_tail(&rwi->list, &adapter->rwi_list);
+ mutex_unlock(&adapter->rwi_lock);
+ schedule_work(&adapter->ibmvnic_reset);
+}
+
+static void ibmvnic_tx_timeout(struct net_device *dev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(dev);
+
+ ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
}
static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
return ibmvnic_send_crq(adapter, &crq);
}
-static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
-{
- union ibmvnic_crq crq;
-
- memset(&crq, 0, sizeof(crq));
- crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
- crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
- netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
-
- return ibmvnic_send_crq(adapter, &crq);
-}
-
static int send_version_xchg(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
if (be32_to_cpu(crq->error_indication.error_id))
request_error_information(adapter, crq);
+
+ if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
}
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
}
}
-static void ibmvnic_xport_event(struct work_struct *work)
-{
- struct ibmvnic_adapter *adapter = container_of(work,
- struct ibmvnic_adapter,
- ibmvnic_xport);
- struct device *dev = &adapter->vdev->dev;
- long rc;
-
- release_sub_crqs(adapter);
- if (adapter->migrated) {
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
- }
-}
-
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
switch (gen_crq->cmd) {
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
- /* Send back a response */
- rc = ibmvnic_send_crq_init_complete(adapter);
- if (!rc)
- schedule_work(&adapter->vnic_crq_init);
- else
- dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
}
return;
case IBMVNIC_CRQ_XPORT_EVENT:
+ netif_carrier_off(netdev);
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
- dev_info(dev, "Re-enabling adapter\n");
- adapter->migrated = true;
- schedule_work(&adapter->ibmvnic_xport);
+ dev_info(dev, "Migrated, re-enabling adapter\n");
+ ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
- netif_carrier_off(netdev);
- adapter->failover = true;
+ ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
} else {
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- schedule_work(&adapter->ibmvnic_xport);
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
return retrc;
}
-static void handle_crq_init_rsp(struct work_struct *work)
-{
- struct ibmvnic_adapter *adapter = container_of(work,
- struct ibmvnic_adapter,
- vnic_crq_init);
- struct device *dev = &adapter->vdev->dev;
- struct net_device *netdev = adapter->netdev;
- unsigned long timeout = msecs_to_jiffies(30000);
- bool restart = false;
- int rc;
-
- if (adapter->failover) {
- release_sub_crqs(adapter);
- if (netif_running(netdev)) {
- netif_tx_disable(netdev);
- ibmvnic_close(netdev);
- restart = true;
- }
- }
-
- reinit_completion(&adapter->init_done);
- send_version_xchg(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- dev_err(dev, "Passive init timeout\n");
- goto task_failed;
- }
-
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
-
- if (adapter->failover) {
- adapter->failover = false;
- if (restart) {
- rc = ibmvnic_open(netdev);
- if (rc)
- goto restart_failed;
- }
- netif_carrier_on(netdev);
- return;
- }
-
- rc = register_netdev(netdev);
- if (rc) {
- dev_err(dev,
- "failed to register netdev rc=%d\n", rc);
- goto register_failed;
- }
- dev_info(dev, "ibmvnic registered\n");
-
- return;
-
-restart_failed:
- dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
-register_failed:
- release_sub_crqs(adapter);
-task_failed:
- dev_err(dev, "Passive initialization was not successful\n");
-}
-
static int ibmvnic_init(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
dev_set_drvdata(&dev->dev, netdev);
adapter->vdev = dev;
adapter->netdev = netdev;
- adapter->failover = false;
ether_addr_copy(adapter->mac_addr, mac_addr_p);
ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
- INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
-
spin_lock_init(&adapter->stats_lock);
INIT_LIST_HEAD(&adapter->errors);
spin_lock_init(&adapter->error_list_lock);
+ INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
+ INIT_LIST_HEAD(&adapter->rwi_list);
+ mutex_init(&adapter->reset_lock);
+ mutex_init(&adapter->rwi_lock);
+ adapter->resetting = false;
+
rc = ibmvnic_init(adapter);
if (rc) {
free_netdev(netdev);
adapter->state = VNIC_REMOVING;
unregister_netdev(netdev);
+ mutex_lock(&adapter->reset_lock);
release_resources(adapter);
release_sub_crqs(adapter);
adapter->state = VNIC_REMOVED;
+ mutex_unlock(&adapter->reset_lock);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);