return ar->hif_ops->scatter_req_add(ar, s_req);
}
-static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar,
- struct hif_dev_scat_sup_info *info)
+static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar)
{
- return ar->hif_ops->enable_scatter(ar, info);
+ return ar->hif_ops->enable_scatter(ar);
}
static inline int ath6kl_hif_scat_req_rw(struct ath6kl *ar,
struct hif_scatter_item scat_list[1];
};
-struct hif_dev_scat_sup_info {
- int max_scat_entries;
- int max_xfer_szper_scatreq;
-};
-
struct ath6kl_hif_ops {
int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request);
struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
void (*scatter_req_add)(struct ath6kl *ar,
struct hif_scatter_req *s_req);
- int (*enable_scatter)(struct ath6kl *ar,
- struct hif_dev_scat_sup_info *info);
+ int (*enable_scatter)(struct ath6kl *ar);
int (*scat_req_rw) (struct ath6kl *ar,
struct hif_scatter_req *scat_req);
void (*cleanup_scatter)(struct ath6kl *ar);
{
struct htc_target *target = endpoint->target;
struct hif_scatter_req *scat_req = NULL;
- struct hif_dev_scat_sup_info hif_info;
int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
- hif_info = target->dev->hif_scat_info;
-
while (true) {
n_scat = get_queue_depth(queue);
n_scat = min(n_scat, target->msg_per_bndl_max);
static void htc_setup_msg_bndl(struct htc_target *target)
{
- struct hif_dev_scat_sup_info *scat_info = &target->dev->hif_scat_info;
-
/* limit what HTC can handle */
target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
target->msg_per_bndl_max);
- if (ath6kl_hif_enable_scatter(target->dev->ar, scat_info)) {
+ if (ath6kl_hif_enable_scatter(target->dev->ar)) {
target->msg_per_bndl_max = 0;
return;
}
/* limit bundle what the device layer can handle */
- target->msg_per_bndl_max = min(scat_info->max_scat_entries,
+ target->msg_per_bndl_max = min(target->max_scat_entries,
target->msg_per_bndl_max);
ath6kl_dbg(ATH6KL_DBG_TRC,
target->msg_per_bndl_max);
/* Max rx bundle size is limited by the max tx bundle size */
- target->max_rx_bndl_sz = scat_info->max_xfer_szper_scatreq;
+ target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
/* Max tx bundle size if limited by the extended mbox address range */
target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
- scat_info->max_xfer_szper_scatreq);
+ target->max_xfer_szper_scatreq);
ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
target->max_rx_bndl_sz, target->max_tx_bndl_sz);
u32 block_sz;
u32 block_mask;
+
+ int max_scat_entries;
+ int max_xfer_szper_scatreq;
};
void *htc_create(struct ath6kl *ar);
struct ath6kl_irq_enable_reg irq_en_reg;
u8 pad3[A_CACHE_LINE_PAD];
struct htc_target *htc_cnxt;
- struct hif_dev_scat_sup_info hif_scat_info;
int chk_irq_status_cnt;
struct ath6kl *ar;
};
}
/* setup of HIF scatter resources */
-static int ath6kl_sdio_enable_scatter(struct ath6kl *ar,
- struct hif_dev_scat_sup_info *pinfo)
+static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct htc_target *target = ar->htc_target;
int ret;
bool virt_scat = false;
MAX_SCATTER_REQUESTS,
MAX_SCATTER_ENTRIES_PER_REQ);
- pinfo->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
- pinfo->max_xfer_szper_scatreq =
+ target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
MAX_SCATTER_REQ_TRANSFER_SIZE;
} else {
ath6kl_sdio_cleanup_scatter(ar);
"Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
- pinfo->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
- pinfo->max_xfer_szper_scatreq =
+ target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
}