CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)
-mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o
+mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
put_unaligned_le32(val, skb_put(skb, 4));
}
-int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
- enum mcu_function func, u32 val)
-{
- struct sk_buff *skb;
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(func),
- .value = cpu_to_le32(val),
- };
-
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, func == 5);
-}
-
int
mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
{
int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
{
- return mt76x0_mcu_function_select(dev, Q_SELECT, 1);
+ return mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false);
}
int
mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
-int
-mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
-
#endif
return ;
}
- mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+ mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false);
}
static void
--- /dev/null
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76.h"
+#include "mt76x02_mcu.h"
+#include "mt76x02_dma.h"
+
+struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+ memcpy(skb_put(skb, len), data, len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
+
+static struct sk_buff *
+mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
+{
+ unsigned long timeout;
+
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ timeout = expires - jiffies;
+ wait_event_timeout(dev->mmio.mcu.wait,
+ !skb_queue_empty(&dev->mmio.mcu.res_q),
+ timeout);
+ return skb_dequeue(&dev->mmio.mcu.res_q);
+}
+
+static int
+mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, int cmd, int seq)
+{
+ struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue_buf buf;
+ dma_addr_t addr;
+ u32 tx_info;
+
+ tx_info = MT_MCU_MSG_TYPE_CMD |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+ addr = dma_map_single(dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->dev, addr))
+ return -ENOMEM;
+
+ buf.addr = addr;
+ buf.len = skb->len;
+ spin_lock_bh(&q->lock);
+ dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ dev->queue_ops->kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ return 0;
+}
+
+int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ unsigned long expires = jiffies + HZ;
+ int ret;
+ u8 seq;
+
+ if (!skb)
+ return -EINVAL;
+
+ mutex_lock(&dev->mmio.mcu.mutex);
+
+ seq = ++dev->mmio.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mmio.mcu.msg_seq & 0xf;
+
+ ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+ if (ret)
+ goto out;
+
+ while (wait_resp) {
+ u32 *rxfce;
+ bool check_seq = false;
+
+ skb = mt76x02_mcu_get_response(dev, expires);
+ if (!skb) {
+ dev_err(dev->dev,
+ "MCU message %d (seq %d) timed out\n", cmd,
+ seq);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxfce = (u32 *) skb->cb;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+ check_seq = true;
+
+ dev_kfree_skb(skb);
+ if (check_seq)
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->mmio.mcu.mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
+
+int mt76x02_mcu_function_select(struct mt76_dev *dev,
+ enum mcu_function func,
+ u32 val, bool wait_resp)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
+ return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
+ wait_resp);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
+
+int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+ bool wait_resp)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+
+ skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
+ return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
+ wait_resp);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
+
+int mt76x02_mcu_cleanup(struct mt76_dev *dev)
+{
+ struct sk_buff *skb;
+
+ dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1);
+ usleep_range(20000, 30000);
+
+ while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
u8 pad[2];
};
+int mt76x02_mcu_cleanup(struct mt76_dev *dev);
+struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
+int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+int mt76x02_mcu_function_select(struct mt76_dev *dev,
+ enum mcu_function func,
+ u32 val, bool wait_resp);
+int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
+ bool wait_resp);
+
#endif /* __MT76x02_MCU_H */
int mt76x2_mcu_init(struct mt76x2_dev *dev);
int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan);
-int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel);
-int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
int mt76x2_dma_init(struct mt76x2_dev *dev);
void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
void mt76x2_cleanup(struct mt76x2_dev *dev);
-int mt76x2_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
- struct sk_buff *skb, int cmd, int seq);
void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
#include "mt76x2.h"
#include "mt76x02_dma.h"
-int
-mt76x2_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
- struct sk_buff *skb, int cmd, int seq)
-{
- struct mt76_queue *q = &dev->q_tx[qid];
- struct mt76_queue_buf buf;
- dma_addr_t addr;
- u32 tx_info;
-
- tx_info = MT_MCU_MSG_TYPE_CMD |
- FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
- FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
- FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
- FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
-
- addr = dma_map_single(dev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
- return -ENOMEM;
-
- buf.addr = addr;
- buf.len = skb->len;
- spin_lock_bh(&q->lock);
- dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
- dev->queue_ops->kick(dev, q);
- spin_unlock_bh(&q->lock);
-
- return 0;
-}
-
static int
mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
- mt76x2_mcu_set_radio_state(dev, false);
+ mt76x02_mcu_set_radio_state(&dev->mt76, false, true);
mt76x2_mac_stop(dev, false);
}
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
mt76x2_dma_cleanup(dev);
- mt76x2_mcu_cleanup(dev);
+ mt76x02_mcu_cleanup(&dev->mt76);
}
struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
#include "mt76x2_eeprom.h"
#include "mt76x02_dma.h"
-static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return NULL;
- memcpy(skb_put(skb, len), data, len);
-
- return skb;
-}
-
-static struct sk_buff *
-mt76x2_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
-{
- unsigned long timeout;
-
- if (!time_is_after_jiffies(expires))
- return NULL;
-
- timeout = expires - jiffies;
- wait_event_timeout(dev->mmio.mcu.wait,
- !skb_queue_empty(&dev->mmio.mcu.res_q),
- timeout);
- return skb_dequeue(&dev->mmio.mcu.res_q);
-}
-
-static int
-mt76x2_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp)
-{
- unsigned long expires = jiffies + HZ;
- int ret;
- u8 seq;
-
- if (!skb)
- return -EINVAL;
-
- mutex_lock(&dev->mmio.mcu.mutex);
-
- seq = ++dev->mmio.mcu.msg_seq & 0xf;
- if (!seq)
- seq = ++dev->mmio.mcu.msg_seq & 0xf;
-
- ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
- if (ret)
- goto out;
-
- while (wait_resp) {
- u32 *rxfce;
- bool check_seq = false;
-
- skb = mt76x2_mcu_get_response(dev, expires);
- if (!skb) {
- dev_err(dev->dev,
- "MCU message %d (seq %d) timed out\n", cmd,
- seq);
- ret = -ETIMEDOUT;
- break;
- }
-
- rxfce = (u32 *) skb->cb;
-
- if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
- check_seq = true;
-
- dev_kfree_skb(skb);
- if (check_seq)
- break;
- }
-
-out:
- mutex_unlock(&dev->mmio.mcu.mutex);
-
- return ret;
-}
-
static int
mt76pci_load_rom_patch(struct mt76x2_dev *dev)
{
return -ENOENT;
}
-static int
-mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
- u32 val)
-{
- struct sk_buff *skb;
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(func),
- .value = cpu_to_le32(val),
- };
-
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, true);
-}
-
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
}
-int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
-{
- struct sk_buff *skb;
- struct {
- __le32 mode;
- __le32 level;
- } __packed __aligned(4) msg = {
- .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
- .level = cpu_to_le32(0),
- };
-
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, true);
-}
-
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param)
{
int mt76x2_mcu_init(struct mt76x2_dev *dev)
{
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
- .mcu_msg_alloc = mt76x2_mcu_msg_alloc,
- .mcu_send_msg = mt76x2_mcu_msg_send,
+ .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
+ .mcu_send_msg = mt76x02_mcu_msg_send,
};
int ret;
if (ret)
return ret;
- mt76x2_mcu_function_select(dev, Q_SELECT, 1);
- return 0;
-}
-
-int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
-{
- struct sk_buff *skb;
-
- mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
- usleep_range(20000, 30000);
-
- while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
- dev_kfree_skb(skb);
-
+ mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true);
return 0;
}
{
int ret;
- ret = mt76x2_mcu_set_radio_state(dev, true);
+ ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true);
if (ret)
return ret;
bool force);
int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca);
-int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
u8 temp_level, u8 channel);
int mt76x2u_mcu_init(struct mt76x2_dev *dev);
void mt76x2u_cleanup(struct mt76x2_dev *dev)
{
- mt76x2u_mcu_set_radio_state(dev, false);
+ mt76x02_mcu_set_radio_state(&dev->mt76, false, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);
#define MT76U_MCU_DLM_OFFSET 0x110000
#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
-static int
-mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
- u32 val)
-{
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(func),
- .value = cpu_to_le32(val),
- };
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
- func != Q_SELECT);
-}
-
-int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
-{
- struct {
- __le32 mode;
- __le32 level;
- } __packed __aligned(4) msg = {
- .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
- .level = cpu_to_le32(0),
- };
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
- false);
-}
-
int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
{
int err;
- err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
+ err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT,
+ 1, false);
if (err < 0)
return err;
- return mt76x2u_mcu_set_radio_state(dev, true);
+ return mt76x02_mcu_set_radio_state(&dev->mt76, true, false);
}