#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
/* defined below */
struct iwl_device_cmd;
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
- __le32 *bd;
- dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
- u32 read;
- u32 write;
- u32 free_count;
- u32 write_actual;
- struct list_head rx_free;
- struct list_head rx_used;
- int need_update;
- struct iwl_rb_status *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
-};
-
#define IWL_SUPPORTED_RATES_IE_LEN 8
#define MAX_TID_COUNT 9
int activity_timer_active;
- /* Rx and Tx DMA processing queues */
- struct iwl_rx_queue rxq;
+ /* Tx DMA processing queues */
struct iwl_tx_queue *txq;
unsigned long txq_ctx_active_msk;
struct iwl_dma_ptr kw; /* keep warm address */
struct work_struct restart;
struct work_struct scan_completed;
- struct work_struct rx_replenish;
struct work_struct abort_scan;
struct work_struct beacon_update;
}
#endif
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
#ifdef CONFIG_PM
int iwl_suspend(struct iwl_priv *priv);
int iwl_resume(struct iwl_priv *priv);
/*This file includes the declaration that are internal to the
* trans_pcie layer */
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @write_actual:
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ * @lock:
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct iwl_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
/**
* struct iwl_trans_pcie - PCIe transport specific data
+ * @rxq: all the RX queue data
+ * @rx_replenish: work that will be called when buffers need to be allocated
+ * @trans: pointer to the generic transport area
*/
struct iwl_trans_pcie {
+ struct iwl_rx_queue rxq;
+ struct work_struct rx_replenish;
+ struct iwl_trans *trans;
};
+#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
+ ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
+
/*****************************************************
* RX
******************************************************/
void iwl_bg_rx_replenish(struct work_struct *data);
void iwl_irq_tasklet(struct iwl_priv *priv);
-void iwlagn_rx_replenish(struct iwl_priv *priv);
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
+void iwlagn_rx_replenish(struct iwl_trans *trans);
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
struct iwl_rx_queue *q);
/*****************************************************
/**
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
+void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
struct iwl_rx_queue *q)
{
+ struct iwl_priv *priv = priv(trans);
unsigned long flags;
u32 reg;
iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
} else {
/* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
+ if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
+ IWL_DEBUG_INFO(trans,
"Rx queue requesting wakeup,"
" GP1 = 0x%x\n", reg);
iwl_set_bit(priv, CSR_GP_CNTRL,
/**
* iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
-static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
+static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
return cpu_to_le32((u32)(dma_addr >> 8));
}
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
+static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
{
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
+ rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->shrd->workqueue, &priv->rx_replenish);
+ queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
+ iwl_rx_queue_update_write_ptr(trans, rxq);
}
}
* Also restock the Rx queue via iwl_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
- if (hw_params(priv).rx_page_order > 0)
+ if (hw_params(trans).rx_page_order > 0)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask,
- hw_params(priv).rx_page_order);
+ hw_params(trans).rx_page_order);
if (!page) {
if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, "
"order: %d\n",
- hw_params(priv).rx_page_order);
+ hw_params(trans).rx_page_order);
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s."
+ IWL_CRIT(trans, "Failed to alloc_pages with %s."
"Only %u free buffers remaining.\n",
priority == GFP_ATOMIC ?
"GFP_ATOMIC" : "GFP_KERNEL",
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, hw_params(priv).rx_page_order);
+ __free_pages(page, hw_params(trans).rx_page_order);
return;
}
element = rxq->rx_used.next;
BUG_ON(rxb->page);
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(priv->bus->dev, page, 0,
- PAGE_SIZE << hw_params(priv).rx_page_order,
+ rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
}
}
-void iwlagn_rx_replenish(struct iwl_priv *priv)
+void iwlagn_rx_replenish(struct iwl_trans *trans)
{
unsigned long flags;
- iwlagn_rx_allocate(priv, GFP_KERNEL);
+ iwlagn_rx_allocate(trans, GFP_KERNEL);
- spin_lock_irqsave(&priv->shrd->lock, flags);
- iwlagn_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwlagn_rx_queue_restock(trans);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
}
-static void iwlagn_rx_replenish_now(struct iwl_priv *priv)
+static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
{
- iwlagn_rx_allocate(priv, GFP_ATOMIC);
+ iwlagn_rx_allocate(trans, GFP_ATOMIC);
- iwlagn_rx_queue_restock(priv);
+ iwlagn_rx_queue_restock(trans);
}
void iwl_bg_rx_replenish(struct work_struct *data)
{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
+ struct iwl_trans_pcie *trans_pcie =
+ container_of(data, struct iwl_trans_pcie, rx_replenish);
+ struct iwl_trans *trans = trans_pcie->trans;
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
return;
- mutex_lock(&priv->shrd->mutex);
- iwlagn_rx_replenish(priv);
- mutex_unlock(&priv->shrd->mutex);
+ mutex_lock(&trans->shrd->mutex);
+ iwlagn_rx_replenish(trans);
+ mutex_unlock(&trans->shrd->mutex);
}
/**
* the appropriate handlers, including command responses,
* frame-received notifications, and other notifications.
*/
-static void iwl_rx_handle(struct iwl_priv *priv)
+static void iwl_rx_handle(struct iwl_trans *trans)
{
struct iwl_rx_mem_buffer *rxb;
struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
u32 r, i;
int reclaim;
unsigned long flags;
/* Rx interrupt, but nothing sent from uCode */
if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
+ IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
/* calculate total frames need to be restock after handling RX */
total_empty = r - rxq->write_actual;
rxq->queue[i] = NULL;
- dma_unmap_page(priv->bus->dev, rxb->page_dma,
- PAGE_SIZE << hw_params(priv).rx_page_order,
+ dma_unmap_page(bus(trans)->dev, rxb->page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
pkt = rxb_addr(rxb);
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
+ IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv, pkt, len);
+ trace_iwlwifi_dev_rx(priv(trans), pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
(pkt->hdr.cmd != REPLY_TX);
- iwl_rx_dispatch(priv, rxb);
+ iwl_rx_dispatch(priv(trans), rxb);
/*
* XXX: After here, we should always check rxb->page
* iwl_trans_send_cmd()
* as we reclaim the driver command queue */
if (rxb->page)
- iwl_tx_cmd_complete(priv, rxb);
+ iwl_tx_cmd_complete(priv(trans), rxb);
else
- IWL_WARN(priv, "Claim null rxb?\n");
+ IWL_WARN(trans, "Claim null rxb?\n");
}
/* Reuse the page if possible. For notification packets and
* rx_free list for reuse later. */
spin_lock_irqsave(&rxq->lock, flags);
if (rxb->page != NULL) {
- rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page,
+ rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
0, PAGE_SIZE <<
- hw_params(priv).rx_page_order,
+ hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
count++;
if (count >= 8) {
rxq->read = i;
- iwlagn_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(trans);
count = 0;
}
}
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
- iwlagn_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(trans);
else
- iwlagn_rx_queue_restock(priv);
+ iwlagn_rx_queue_restock(trans);
}
/* tasklet for iwlagn interrupt */
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans(priv));
IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
+ iwl_rx_queue_update_write_ptr(trans(priv), &trans_pcie->rxq);
for (i = 0; i < hw_params(priv).max_txq_num; i++)
iwl_txq_update_write_ptr(priv, &priv->txq[i]);
/* Disable periodic interrupt; we use it as just a one-shot. */
iwl_write8(priv, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_rx_handle(priv);
+ iwl_rx_handle(trans(priv));
/*
* Enable periodic interrupt in 8 msec only if we received
#include "iwl-core.h"
#include "iwl-shared.h"
-static int iwl_trans_rx_alloc(struct iwl_priv *priv)
+static int iwl_trans_rx_alloc(struct iwl_trans *trans)
{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = priv->bus->dev;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct device *dev = bus(trans)->dev;
- memset(&priv->rxq, 0, sizeof(priv->rxq));
+ memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
spin_lock_init(&rxq->lock);
INIT_LIST_HEAD(&rxq->rx_free);
return -ENOMEM;
}
-static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
+static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
{
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
int i;
/* Fill the rx_used queue with _all_ of the Rx buffers */
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
- dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << hw_params(priv).rx_page_order,
+ dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
+ __iwl_free_pages(priv(trans), rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
-static int iwl_rx_init(struct iwl_priv *priv)
+static int iwl_rx_init(struct iwl_trans *trans)
{
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
int i, err;
unsigned long flags;
if (!rxq->bd) {
- err = iwl_trans_rx_alloc(priv);
+ err = iwl_trans_rx_alloc(trans);
if (err)
return err;
}
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
- iwl_trans_rxq_free_rx_bufs(priv);
+ iwl_trans_rxq_free_rx_bufs(trans);
for (i = 0; i < RX_QUEUE_SIZE; i++)
rxq->queue[i] = NULL;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
- iwlagn_rx_replenish(priv);
+ iwlagn_rx_replenish(trans);
- iwl_trans_rx_hw_init(priv, rxq);
+ iwl_trans_rx_hw_init(priv(trans), rxq);
- spin_lock_irqsave(&priv->shrd->lock, flags);
+ spin_lock_irqsave(&trans->shrd->lock, flags);
rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
+ iwl_rx_queue_update_write_ptr(trans, rxq);
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
return 0;
}
-static void iwl_trans_pcie_rx_free(struct iwl_priv *priv)
+static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
{
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+
unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
if (!rxq->bd) {
- IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
return;
}
spin_lock_irqsave(&rxq->lock, flags);
- iwl_trans_rxq_free_rx_bufs(priv);
+ iwl_trans_rxq_free_rx_bufs(trans);
spin_unlock_irqrestore(&rxq->lock, flags);
- dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
rxq->bd = NULL;
if (rxq->rb_stts)
- dma_free_coherent(priv->bus->dev,
+ dma_free_coherent(bus(trans)->dev,
sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
else
- IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
+ IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
rxq->rb_stts = NULL;
}
priv->cfg->lib->nic_config(priv);
/* Allocate the RX queue, or reset if it is already allocated */
- iwl_rx_init(priv);
+ iwl_rx_init(trans(priv));
/* Allocate or reset and init all Tx and Command queues */
if (iwl_tx_init(priv))
static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_priv *priv = priv(trans);
int err;
return err;
}
- INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
return 0;
}
sizeof(struct iwl_trans_pcie),
GFP_KERNEL);
if (iwl_trans) {
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
iwl_trans->ops = &trans_ops_pcie;
iwl_trans->shrd = shrd;
+ trans_pcie->trans = iwl_trans;
}
return iwl_trans;
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, priv, \
+ if (!debugfs_create_file(#name, mode, parent, trans, \
&iwl_dbgfs_##name##_ops)) \
return -ENOMEM; \
} while (0)
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = file->private_data;
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_priv *priv = priv(trans);
int pos = 0, ofs = 0;
int cnt = 0, entry;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq;
struct iwl_queue *q;
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
char *buf;
int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
(priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
ssize_t ret;
if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
+ IWL_ERR(trans, "txq not ready\n");
return -EAGAIN;
}
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
- IWL_ERR(priv, "Can not allocate buffer\n");
+ IWL_ERR(trans, "Can not allocate buffer\n");
return -ENOMEM;
}
pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
+ for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
txq = &priv->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
cnt, q->read_ptr, q->write_ptr);
}
if (priv->tx_traffic &&
- (iwl_get_debug_level(priv->shrd) & IWL_DL_TX)) {
+ (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
ptr = priv->tx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
- "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
+ "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
entry++, ofs += 16) {
rxq->read, rxq->write);
if (priv->rx_traffic &&
- (iwl_get_debug_level(priv->shrd) & IWL_DL_RX)) {
+ (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
ptr = priv->rx_traffic;
pos += scnprintf(buf + pos, bufsz - pos,
- "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
+ "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
entry++, ofs += 16) {
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = file->private_data;
+ struct iwl_trans *trans = file->private_data;
char buf[8];
int buf_size;
int traffic_log;
if (sscanf(buf, "%d", &traffic_log) != 1)
return -EFAULT;
if (traffic_log == 0)
- iwl_reset_traffic_log(priv);
+ iwl_reset_traffic_log(priv(trans));
return count;
}
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = file->private_data;
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_priv *priv = priv(trans);
struct iwl_tx_queue *txq;
struct iwl_queue *q;
char *buf;
if (!buf)
return -ENOMEM;
- for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
+ for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
txq = &priv->txq[cnt];
q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = file->private_data;
- struct iwl_rx_queue *rxq = &priv->rxq;
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rx_queue *rxq = &trans_pcie->rxq;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
struct dentry *dir)
{
- struct iwl_priv *priv = priv(trans);
-
DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
void (*stop_device)(struct iwl_priv *priv);
void (*tx_start)(struct iwl_priv *priv);
void (*tx_free)(struct iwl_priv *priv);
- void (*rx_free)(struct iwl_priv *priv);
+ void (*rx_free)(struct iwl_trans *trans);
int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
static inline void iwl_trans_rx_free(struct iwl_trans *trans)
{
- trans->ops->rx_free(priv(trans));
+ trans->ops->rx_free(trans);
}
static inline void iwl_trans_tx_free(struct iwl_trans *trans)