Use mcu common helpers instead of usb specific routines.
Add static qualifier to the following functions:
- mt76u_mcu_msg_alloc
- __mt76u_mcu_send_msg
- mt76u_mcu_send_msg
- mt76u_mcu_wr_rp
- mt76u_mcu_rd_rp
- mt76u_wr_rp
- mt76u_rd_rp
This is a preliminary patch to move mt76x02 usb mcu code in
mt76x02-usb module
Acked-by: Stanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset);
void mt76u_mcu_complete_urb(struct urb *urb);
-struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
-int __mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp);
-int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp);
-int mt76u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
- const struct mt76_reg_pair *data, int n);
-int mt76u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
- struct mt76_reg_pair *data, int n);
-int mt76u_wr_rp(struct mt76_dev *dev, u32 base,
- const struct mt76_reg_pair *data, int n);
-int mt76u_rd_rp(struct mt76_dev *dev, u32 base,
- struct mt76_reg_pair *data, int n);
void mt76u_mcu_fw_reset(struct mt76_dev *dev);
int mt76u_mcu_init_rx(struct mt76_dev *dev);
void mt76u_mcu_deinit(struct mt76_dev *dev);
mt76_wr(dev, MT_USB_DMA_CFG, val);
}
-#define RANDOM_WRITE(dev, tab) \
- mt76u_wr_rp(&(dev)->mt76, MT_MCU_MEMMAP_WLAN, \
- tab, ARRAY_SIZE(tab))
+#define RANDOM_WRITE(dev, tab) \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN, \
+ tab, ARRAY_SIZE(tab))
static int mt76x0_init_bbp(struct mt76x0_dev *dev)
{
.value = cpu_to_le32(val),
};
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
- func == 5);
+ return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, func == 5);
}
int
.value = cpu_to_le32(val),
};
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP,
- true);
+ return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
}
int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
for (i = 0; i < cnt; i++)
skb_put_le32(skb, data[i]);
- ret = mt76u_mcu_send_msg(&dev->mt76, skb, CMD_BURST_WRITE,
- cnt == n);
+ ret = mt76_mcu_send_msg(dev, skb, CMD_BURST_WRITE, cnt == n);
if (ret)
return ret;
.value = val,
};
- return mt76u_wr_rp(&dev->mt76, MT_MCU_MEMMAP_RF, &pair, 1);
+ return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
WARN_ON_ONCE(1);
return mt76x0_rf_csr_wr(dev, offset, val);
.reg = offset,
};
- ret = mt76u_rd_rp(&dev->mt76, MT_MCU_MEMMAP_RF, &pair, 1);
+ ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
WARN_ON_ONCE(1);
}
#endif
-#define RF_RANDOM_WRITE(dev, tab) \
- mt76u_wr_rp(&(dev)->mt76, MT_MCU_MEMMAP_RF, \
- tab, ARRAY_SIZE(tab))
+#define RF_RANDOM_WRITE(dev, tab) \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \
+ tab, ARRAY_SIZE(tab))
int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
{
};
struct sk_buff *skb;
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
- func != Q_SELECT);
+ return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
+ func != Q_SELECT);
}
int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
};
struct sk_buff *skb;
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
- false);
+ return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
+ false);
}
int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
+ return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
}
int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
struct sk_buff *skb;
/* first set the channel without the extension channel info */
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+ mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index;
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
+ return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
}
int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
};
struct sk_buff *skb;
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+ return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
}
int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
if (force)
msg.channel |= cpu_to_le32(BIT(31));
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
+ return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true);
}
int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
val |= BIT(30);
msg.channel = cpu_to_le32(val);
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
+ return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true);
}
int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
};
struct sk_buff *skb;
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
if (!skb)
return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
+ return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
}
static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
return 0;
}
-int mt76u_wr_rp(struct mt76_dev *dev, u32 base,
- const struct mt76_reg_pair *data, int n)
+static int
+mt76u_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
{
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
- return mt76u_mcu_wr_rp(dev, base, data, n);
+ return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
else
return mt76u_req_wr_rp(dev, base, data, n);
}
-EXPORT_SYMBOL_GPL(mt76u_wr_rp);
static int
mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
return 0;
}
-int mt76u_rd_rp(struct mt76_dev *dev, u32 base,
- struct mt76_reg_pair *data, int n)
+static int
+mt76u_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
{
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
- return mt76u_mcu_rd_rp(dev, base, data, n);
+ return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
else
return mt76u_req_rd_rp(dev, base, data, n);
}
-EXPORT_SYMBOL_GPL(mt76u_rd_rp);
static int
mt76u_set_endpoints(struct usb_interface *intf,
#define MT_INBAND_PACKET_MAX_LEN 192
-struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
+static struct sk_buff *
+mt76u_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
return skb;
}
-EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
void mt76u_mcu_complete_urb(struct urb *urb)
{
return -ETIMEDOUT;
}
-int __mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp)
+static int
+__mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
return ret;
}
-EXPORT_SYMBOL_GPL(__mt76u_mcu_send_msg);
-int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp)
+static int
+mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
{
struct mt76_usb *usb = &dev->usb;
int err;
return err;
}
-EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
static inline void skb_put_le32(struct sk_buff *skb, u32 val)
{
put_unaligned_le32(val, skb_put(skb, 4));
}
-int mt76u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
- const struct mt76_reg_pair *data, int n)
+static int
+mt76u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
{
const int CMD_RANDOM_WRITE = 12;
const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
return mt76u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
}
-int mt76u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
- struct mt76_reg_pair *data, int n)
+static int
+mt76u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
{
const int CMD_RANDOM_READ = 10;
const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;