((void *)((cmdq_pages)->shadow_page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
-#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
- (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
- & ((wq)->num_q_pages - 1))
-
#define WQ_PAGE_ADDR(wq, idx) \
((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
/ (wq)->max_wqe_size)
+static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) & ((wq)->num_wqebbs_per_page - 1))
+ << (wq)->wqebb_size_shift);
+}
+
+static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) >> ((wq)->wqebbs_per_page_shift))
+ & ((wq)->num_q_pages - 1));
+}
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int err;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
wq->q_depth = q_depth;
wq->max_wqe_size = max_wqe_size;
wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+ wq->wqebb_size_shift = wqebb_size_shift;
wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
+ u16 num_wqebbs_per_page_shift;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int i, j, err = -ENOMEM;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
return err;
}
+ num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
for (i = 0; i < cmdq_blocks; i++) {
wq[i].hwif = hwif;
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
+ wq[i].wqebb_size_shift = wqebb_size_shift;
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
atomic_add(num_wqebbs, &wq->delta);
**/
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
atomic_add(num_wqebbs, &wq->cons_idx);
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *cons_idx)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
u16 curr_cons_idx, end_cons_idx;
int curr_pg, end_pg;