void **tid_release_head;
spinlock_t tid_release_lock;
struct work_struct tid_release_task;
+ struct work_struct db_full_task;
+ struct work_struct db_drop_task;
bool tid_release_task_busy;
struct dentry *debugfs_root;
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+void t4_db_full(struct adapter *adapter);
+void t4_db_dropped(struct adapter *adapter);
#endif /* __CXGB4_H__ */
}
EXPORT_SYMBOL(cxgb4_port_chan);
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
+{
+ struct adapter *adap = netdev2adap(dev);
+ u32 v;
+
+ v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
+}
+EXPORT_SYMBOL(cxgb4_dbfifo_count);
+
/**
* cxgb4_port_viid - get the VI id of a port
* @dev: the net device for the port
.notifier_call = netevent_cb
};
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+ mutex_lock(&uld_mutex);
+ if (adap->uld_handle[CXGB4_ULD_RDMA])
+ ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+ cmd);
+ mutex_unlock(&uld_mutex);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+ struct adapter *adap;
+ static int delay = 1000;
+ u32 v;
+
+ adap = container_of(work, struct adapter, db_full_task);
+
+
+ /* stop LLD queues */
+
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+ do {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(delay));
+ v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
+ break;
+ } while (1);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+
+
+ /*
+ * The more we get db full interrupts, the more we'll delay
+ * in re-enabling db rings on queues, capped off at 200ms.
+ */
+ delay = min(delay << 1, 200000);
+
+ /* resume LLD queues */
+}
+
+static void process_db_drop(struct work_struct *work)
+{
+ struct adapter *adap;
+ adap = container_of(work, struct adapter, db_drop_task);
+
+
+ /*
+ * sync the PIDX values in HW and SW for LLD queues.
+ */
+
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+}
+
+void t4_db_full(struct adapter *adap)
+{
+ schedule_work(&adap->db_full_task);
+}
+
+void t4_db_dropped(struct adapter *adap)
+{
+ schedule_work(&adap->db_drop_task);
+}
+
static void uld_attach(struct adapter *adap, unsigned int uld)
{
void *handle;
{
t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
+ cancel_work_sync(&adapter->db_full_task);
+ cancel_work_sync(&adapter->db_drop_task);
adapter->tid_release_task_busy = false;
adapter->tid_release_head = NULL;
spin_lock_init(&adapter->tid_release_lock);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+ INIT_WORK(&adapter->db_full_task, process_db_full);
+ INIT_WORK(&adapter->db_drop_task, process_db_drop);
err = t4_prep_adapter(adapter);
if (err)
CXGB4_STATE_DETACH
};
+enum cxgb4_control {
+ CXGB4_CONTROL_DB_FULL,
+ CXGB4_CONTROL_DB_EMPTY,
+ CXGB4_CONTROL_DB_DROP,
+};
+
struct pci_dev;
struct l2t_data;
struct net_device;
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_port_idx(const struct net_device *dev);
RXPKTCPLMODE |
(STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_HP_INT_THRESH(5) | V_LP_INT_THRESH(5),
+ V_HP_INT_THRESH(5) | V_LP_INT_THRESH(5));
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
+ F_ENABLE_DROP);
+
for (i = v = 0; i < 32; i += 4)
v |= (PAGE_SHIFT - 10) << i;
t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
{ ERR_INVALID_CIDX_INC,
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+ { F_DBFIFO_LP_INT, NULL, -1, 0 },
+ { F_DBFIFO_HP_INT, NULL, -1, 0 },
{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
}
+ err = t4_read_reg(adapter, A_SGE_INT_CAUSE3);
+ if (err & (F_DBFIFO_HP_INT|F_DBFIFO_LP_INT))
+ t4_db_full(adapter);
+ if (err & F_ERR_DROPPED_DB)
+ t4_db_dropped(adapter);
+
if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
v != 0)
t4_fatal_err(adapter);
ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
+ F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
EGRESS_SIZE_ERR);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);