L: netdev@vger.kernel.org
W: http://www.emulex.com
S: Supported
-F: drivers/net/benet/
+F: drivers/net/ethernet/emulex/benet/
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
<http://support.brocade.com>
-source "drivers/net/benet/Kconfig"
-
endif # NETDEV_10000
source "drivers/net/tokenring/Kconfig"
obj-$(CONFIG_TEHUTI) += tehuti.o
obj-$(CONFIG_ENIC) += enic/
obj-$(CONFIG_JME) += jme.o
-obj-$(CONFIG_BE2NET) += benet/
obj-$(CONFIG_VMXNET3) += vmxnet3/
obj-$(CONFIG_BNA) += bna/
+++ /dev/null
-config BE2NET
- tristate "ServerEngines' 10Gbps NIC - BladeEngine"
- depends on PCI && INET
- help
- This driver implements the NIC functionality for ServerEngines'
- 10Gbps network adapter - BladeEngine.
+++ /dev/null
-#
-# Makefile to build the network driver for ServerEngine's BladeEngine.
-#
-
-obj-$(CONFIG_BE2NET) += be2net.o
-
-be2net-y := be_main.o be_cmds.o be_ethtool.o
+++ /dev/null
-/*
- * Copyright (C) 2005 - 2011 Emulex
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-#ifndef BE_H
-#define BE_H
-
-#include <linux/pci.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <net/tcp.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <linux/if_vlan.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/u64_stats_sync.h>
-
-#include "be_hw.h"
-
-#define DRV_VER "4.0.100u"
-#define DRV_NAME "be2net"
-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
-#define OC_NAME_BE OC_NAME "(be3)"
-#define OC_NAME_LANCER OC_NAME "(Lancer)"
-#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
-
-#define BE_VENDOR_ID 0x19a2
-#define EMULEX_VENDOR_ID 0x10df
-#define BE_DEVICE_ID1 0x211
-#define BE_DEVICE_ID2 0x221
-#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
-#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
-#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
-#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
-
-static inline char *nic_name(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case OC_DEVICE_ID1:
- return OC_NAME;
- case OC_DEVICE_ID2:
- return OC_NAME_BE;
- case OC_DEVICE_ID3:
- case OC_DEVICE_ID4:
- return OC_NAME_LANCER;
- case BE_DEVICE_ID2:
- return BE3_NAME;
- default:
- return BE_NAME;
- }
-}
-
-/* Number of bytes of an RX frame that are copied to skb->data */
-#define BE_HDR_LEN ((u16) 64)
-#define BE_MAX_JUMBO_FRAME_SIZE 9018
-#define BE_MIN_MTU 256
-
-#define BE_NUM_VLANS_SUPPORTED 64
-#define BE_MAX_EQD 96
-#define BE_MAX_TX_FRAG_COUNT 30
-
-#define EVNT_Q_LEN 1024
-#define TX_Q_LEN 2048
-#define TX_CQ_LEN 1024
-#define RX_Q_LEN 1024 /* Does not support any other value */
-#define RX_CQ_LEN 1024
-#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
-#define MCC_CQ_LEN 256
-
-#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
-#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
-#define MAX_TX_QS 8
-#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
-#define BE_NAPI_WEIGHT 64
-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
-#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
-
-#define FW_VER_LEN 32
-
-struct be_dma_mem {
- void *va;
- dma_addr_t dma;
- u32 size;
-};
-
-struct be_queue_info {
- struct be_dma_mem dma_mem;
- u16 len;
- u16 entry_size; /* Size of an element in the queue */
- u16 id;
- u16 tail, head;
- bool created;
- atomic_t used; /* Number of valid elements in the queue */
-};
-
-static inline u32 MODULO(u16 val, u16 limit)
-{
- BUG_ON(limit & (limit - 1));
- return val & (limit - 1);
-}
-
-static inline void index_adv(u16 *index, u16 val, u16 limit)
-{
- *index = MODULO((*index + val), limit);
-}
-
-static inline void index_inc(u16 *index, u16 limit)
-{
- *index = MODULO((*index + 1), limit);
-}
-
-static inline void *queue_head_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->head * q->entry_size;
-}
-
-static inline void *queue_tail_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->tail * q->entry_size;
-}
-
-static inline void queue_head_inc(struct be_queue_info *q)
-{
- index_inc(&q->head, q->len);
-}
-
-static inline void queue_tail_inc(struct be_queue_info *q)
-{
- index_inc(&q->tail, q->len);
-}
-
-struct be_eq_obj {
- struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
- u8 eq_idx;
-
- struct napi_struct napi;
-};
-
-struct be_mcc_obj {
- struct be_queue_info q;
- struct be_queue_info cq;
- bool rearm_cq;
-};
-
-struct be_tx_stats {
- u64 tx_bytes;
- u64 tx_pkts;
- u64 tx_reqs;
- u64 tx_wrbs;
- u64 tx_compl;
- ulong tx_jiffies;
- u32 tx_stops;
- struct u64_stats_sync sync;
- struct u64_stats_sync sync_compl;
-};
-
-struct be_tx_obj {
- struct be_queue_info q;
- struct be_queue_info cq;
- /* Remember the skbs that were transmitted */
- struct sk_buff *sent_skb_list[TX_Q_LEN];
- struct be_tx_stats stats;
-};
-
-/* Struct to remember the pages posted for rx frags */
-struct be_rx_page_info {
- struct page *page;
- DEFINE_DMA_UNMAP_ADDR(bus);
- u16 page_offset;
- bool last_page_user;
-};
-
-struct be_rx_stats {
- u64 rx_bytes;
- u64 rx_pkts;
- u64 rx_pkts_prev;
- ulong rx_jiffies;
- u32 rx_drops_no_skbs; /* skb allocation errors */
- u32 rx_drops_no_frags; /* HW has no fetched frags */
- u32 rx_post_fail; /* page post alloc failures */
- u32 rx_polls; /* NAPI calls */
- u32 rx_events;
- u32 rx_compl;
- u32 rx_mcast_pkts;
- u32 rx_compl_err; /* completions with err set */
- u32 rx_pps; /* pkts per second */
- struct u64_stats_sync sync;
-};
-
-struct be_rx_compl_info {
- u32 rss_hash;
- u16 vlan_tag;
- u16 pkt_size;
- u16 rxq_idx;
- u16 port;
- u8 vlanf;
- u8 num_rcvd;
- u8 err;
- u8 ipf;
- u8 tcpf;
- u8 udpf;
- u8 ip_csum;
- u8 l4_csum;
- u8 ipv6;
- u8 vtm;
- u8 pkt_type;
-};
-
-struct be_rx_obj {
- struct be_adapter *adapter;
- struct be_queue_info q;
- struct be_queue_info cq;
- struct be_rx_compl_info rxcp;
- struct be_rx_page_info page_info_tbl[RX_Q_LEN];
- struct be_eq_obj rx_eq;
- struct be_rx_stats stats;
- u8 rss_id;
- bool rx_post_starved; /* Zero rx frags have been posted to BE */
- u32 cache_line_barrier[16];
-};
-
-struct be_drv_stats {
- u8 be_on_die_temperature;
- u32 tx_events;
- u32 eth_red_drops;
- u32 rx_drops_no_pbuf;
- u32 rx_drops_no_txpb;
- u32 rx_drops_no_erx_descr;
- u32 rx_drops_no_tpre_descr;
- u32 rx_drops_too_many_frags;
- u32 rx_drops_invalid_ring;
- u32 forwarded_packets;
- u32 rx_drops_mtu;
- u32 rx_crc_errors;
- u32 rx_alignment_symbol_errors;
- u32 rx_pause_frames;
- u32 rx_priority_pause_frames;
- u32 rx_control_frames;
- u32 rx_in_range_errors;
- u32 rx_out_range_errors;
- u32 rx_frame_too_long;
- u32 rx_address_match_errors;
- u32 rx_dropped_too_small;
- u32 rx_dropped_too_short;
- u32 rx_dropped_header_too_small;
- u32 rx_dropped_tcp_length;
- u32 rx_dropped_runt;
- u32 rx_ip_checksum_errs;
- u32 rx_tcp_checksum_errs;
- u32 rx_udp_checksum_errs;
- u32 tx_pauseframes;
- u32 tx_priority_pauseframes;
- u32 tx_controlframes;
- u32 rxpp_fifo_overflow_drop;
- u32 rx_input_fifo_overflow_drop;
- u32 pmem_fifo_overflow_drop;
- u32 jabber_events;
-};
-
-struct be_vf_cfg {
- unsigned char vf_mac_addr[ETH_ALEN];
- u32 vf_if_handle;
- u32 vf_pmac_id;
- u16 vf_vlan_tag;
- u32 vf_tx_rate;
-};
-
-#define BE_INVALID_PMAC_ID 0xffffffff
-
-struct be_adapter {
- struct pci_dev *pdev;
- struct net_device *netdev;
-
- u8 __iomem *csr;
- u8 __iomem *db; /* Door Bell */
- u8 __iomem *pcicfg; /* PCI config space */
-
- struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
- struct be_dma_mem mbox_mem;
- /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
- * is stored for freeing purpose */
- struct be_dma_mem mbox_mem_alloced;
-
- struct be_mcc_obj mcc_obj;
- spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
- spinlock_t mcc_cq_lock;
-
- struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
- u32 num_msix_vec;
- bool isr_registered;
-
- /* TX Rings */
- struct be_eq_obj tx_eq;
- struct be_tx_obj tx_obj[MAX_TX_QS];
- u8 num_tx_qs;
-
- u32 cache_line_break[8];
-
- /* Rx rings */
- struct be_rx_obj rx_obj[MAX_RX_QS];
- u32 num_rx_qs;
- u32 big_page_size; /* Compounded page size shared by rx wrbs */
-
- u8 eq_next_idx;
- struct be_drv_stats drv_stats;
-
- u16 vlans_added;
- u16 max_vlans; /* Number of vlans supported */
- u8 vlan_tag[VLAN_N_VID];
- u8 vlan_prio_bmap; /* Available Priority BitMap */
- u16 recommended_prio; /* Recommended Priority */
- struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
-
- struct be_dma_mem stats_cmd;
- /* Work queue used to perform periodic tasks like getting statistics */
- struct delayed_work work;
- u16 work_counter;
-
- /* Ethtool knobs and info */
- char fw_ver[FW_VER_LEN];
- u32 if_handle; /* Used to configure filtering */
- u32 pmac_id; /* MAC addr handle used by BE card */
- u32 beacon_state; /* for set_phys_id */
-
- bool eeh_err;
- bool link_up;
- u32 port_num;
- bool promiscuous;
- bool wol;
- u32 function_mode;
- u32 function_caps;
- u32 rx_fc; /* Rx flow control */
- u32 tx_fc; /* Tx flow control */
- bool ue_detected;
- bool stats_cmd_sent;
- int link_speed;
- u8 port_type;
- u8 transceiver;
- u8 autoneg;
- u8 generation; /* BladeEngine ASIC generation */
- u32 flash_status;
- struct completion flash_compl;
-
- bool be3_native;
- bool sriov_enabled;
- struct be_vf_cfg *vf_cfg;
- u8 is_virtfn;
- u32 sli_family;
- u8 hba_port_num;
- u16 pvid;
-};
-
-#define be_physfn(adapter) (!adapter->is_virtfn)
-
-/* BladeEngine Generation numbers */
-#define BE_GEN2 2
-#define BE_GEN3 3
-
-#define ON 1
-#define OFF 0
-#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
- (adapter->pdev->device == OC_DEVICE_ID4))
-
-extern const struct ethtool_ops be_ethtool_ops;
-
-#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
-#define tx_stats(txo) (&txo->stats)
-#define rx_stats(rxo) (&rxo->stats)
-
-#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
-
-#define for_all_rx_queues(adapter, rxo, i) \
- for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
- i++, rxo++)
-
-/* Just skip the first default non-rss queue */
-#define for_all_rss_queues(adapter, rxo, i) \
- for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
- i++, rxo++)
-
-#define for_all_tx_queues(adapter, txo, i) \
- for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
- i++, txo++)
-
-#define PAGE_SHIFT_4K 12
-#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
-
-/* Returns number of pages spanned by the data starting at the given addr */
-#define PAGES_4K_SPANNED(_address, size) \
- ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
- (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
-
-/* Byte offset into the page corresponding to given address */
-#define OFFSET_IN_PAGE(addr) \
- ((size_t)(addr) & (PAGE_SIZE_4K-1))
-
-/* Returns bit offset within a DWORD of a bitfield */
-#define AMAP_BIT_OFFSET(_struct, field) \
- (((size_t)&(((_struct *)0)->field))%32)
-
-/* Returns the bit mask of the field that is NOT shifted into location. */
-static inline u32 amap_mask(u32 bitsize)
-{
- return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
-}
-
-static inline void
-amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
-{
- u32 *dw = (u32 *) ptr + dw_offset;
- *dw &= ~(mask << offset);
- *dw |= (mask & value) << offset;
-}
-
-#define AMAP_SET_BITS(_struct, field, ptr, val) \
- amap_set(ptr, \
- offsetof(_struct, field)/32, \
- amap_mask(sizeof(((_struct *)0)->field)), \
- AMAP_BIT_OFFSET(_struct, field), \
- val)
-
-static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
-{
- u32 *dw = (u32 *) ptr;
- return mask & (*(dw + dw_offset) >> offset);
-}
-
-#define AMAP_GET_BITS(_struct, field, ptr) \
- amap_get(ptr, \
- offsetof(_struct, field)/32, \
- amap_mask(sizeof(((_struct *)0)->field)), \
- AMAP_BIT_OFFSET(_struct, field))
-
-#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
-#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
-static inline void swap_dws(void *wrb, int len)
-{
-#ifdef __BIG_ENDIAN
- u32 *dw = wrb;
- BUG_ON(len % 4);
- do {
- *dw = cpu_to_le32(*dw);
- dw++;
- len -= 4;
- } while (len);
-#endif /* __BIG_ENDIAN */
-}
-
-static inline u8 is_tcp_pkt(struct sk_buff *skb)
-{
- u8 val = 0;
-
- if (ip_hdr(skb)->version == 4)
- val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
- else if (ip_hdr(skb)->version == 6)
- val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
-
- return val;
-}
-
-static inline u8 is_udp_pkt(struct sk_buff *skb)
-{
- u8 val = 0;
-
- if (ip_hdr(skb)->version == 4)
- val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
- else if (ip_hdr(skb)->version == 6)
- val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
-
- return val;
-}
-
-static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
-{
- u32 sli_intf;
-
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
-}
-
-static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
-{
- u32 addr;
-
- addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
-
- mac[5] = (u8)(addr & 0xFF);
- mac[4] = (u8)((addr >> 8) & 0xFF);
- mac[3] = (u8)((addr >> 16) & 0xFF);
- /* Use the OUI from the current MAC address */
- memcpy(mac, adapter->netdev->dev_addr, 3);
-}
-
-static inline bool be_multi_rxq(const struct be_adapter *adapter)
-{
- return adapter->num_rx_qs > 1;
-}
-
-extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
- u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
-extern void be_parse_stats(struct be_adapter *adapter);
-extern int be_load_fw(struct be_adapter *adapter, u8 *func);
-#endif /* BE_H */
+++ /dev/null
-/*
- * Copyright (C) 2005 - 2011 Emulex
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-#include "be.h"
-#include "be_cmds.h"
-
-/* Must be a power of 2 or else MODULO will BUG_ON */
-static int be_get_temp_freq = 32;
-
-static void be_mcc_notify(struct be_adapter *adapter)
-{
- struct be_queue_info *mccq = &adapter->mcc_obj.q;
- u32 val = 0;
-
- if (adapter->eeh_err) {
- dev_info(&adapter->pdev->dev,
- "Error in Card Detected! Cannot issue commands\n");
- return;
- }
-
- val |= mccq->id & DB_MCCQ_RING_ID_MASK;
- val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
-
- wmb();
- iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
-}
-
-/* To check if valid bit is set, check the entire word as we don't know
- * the endianness of the data (old entry is host endian while a new entry is
- * little endian) */
-static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
-{
- if (compl->flags != 0) {
- compl->flags = le32_to_cpu(compl->flags);
- BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
- return true;
- } else {
- return false;
- }
-}
-
-/* Need to reset the entire word that houses the valid bit */
-static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
-{
- compl->flags = 0;
-}
-
-static int be_mcc_compl_process(struct be_adapter *adapter,
- struct be_mcc_compl *compl)
-{
- u16 compl_status, extd_status;
-
- /* Just swap the status to host endian; mcc tag is opaquely copied
- * from mcc_wrb */
- be_dws_le_to_cpu(compl, 4);
-
- compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
-
- if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
- (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
- (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
- adapter->flash_status = compl_status;
- complete(&adapter->flash_compl);
- }
-
- if (compl_status == MCC_STATUS_SUCCESS) {
- if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
- (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
- (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
- be_parse_stats(adapter);
- adapter->stats_cmd_sent = false;
- }
- } else {
- if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
- compl_status == MCC_STATUS_ILLEGAL_REQUEST)
- goto done;
-
- if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
- dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
- "permitted to execute this cmd (opcode %d)\n",
- compl->tag0);
- } else {
- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
- dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
- "status %d, extd-status %d\n",
- compl->tag0, compl_status, extd_status);
- }
- }
-done:
- return compl_status;
-}
-
-/* Link state evt is a string of bytes; no need for endian swapping */
-static void be_async_link_state_process(struct be_adapter *adapter,
- struct be_async_event_link_state *evt)
-{
- be_link_status_update(adapter, evt->port_link_status);
-}
-
-/* Grp5 CoS Priority evt */
-static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
- struct be_async_event_grp5_cos_priority *evt)
-{
- if (evt->valid) {
- adapter->vlan_prio_bmap = evt->available_priority_bmap;
- adapter->recommended_prio &= ~VLAN_PRIO_MASK;
- adapter->recommended_prio =
- evt->reco_default_priority << VLAN_PRIO_SHIFT;
- }
-}
-
-/* Grp5 QOS Speed evt */
-static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
- struct be_async_event_grp5_qos_link_speed *evt)
-{
- if (evt->physical_port == adapter->port_num) {
- /* qos_link_speed is in units of 10 Mbps */
- adapter->link_speed = evt->qos_link_speed * 10;
- }
-}
-
-/*Grp5 PVID evt*/
-static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
- struct be_async_event_grp5_pvid_state *evt)
-{
- if (evt->enabled)
- adapter->pvid = le16_to_cpu(evt->tag);
- else
- adapter->pvid = 0;
-}
-
-static void be_async_grp5_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *evt)
-{
- u8 event_type = 0;
-
- event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
-
- switch (event_type) {
- case ASYNC_EVENT_COS_PRIORITY:
- be_async_grp5_cos_priority_process(adapter,
- (struct be_async_event_grp5_cos_priority *)evt);
- break;
- case ASYNC_EVENT_QOS_SPEED:
- be_async_grp5_qos_speed_process(adapter,
- (struct be_async_event_grp5_qos_link_speed *)evt);
- break;
- case ASYNC_EVENT_PVID_STATE:
- be_async_grp5_pvid_state_process(adapter,
- (struct be_async_event_grp5_pvid_state *)evt);
- break;
- default:
- dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
- break;
- }
-}
-
-static inline bool is_link_state_evt(u32 trailer)
-{
- return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE;
-}
-
-static inline bool is_grp5_evt(u32 trailer)
-{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_GRP_5);
-}
-
-static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
-{
- struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
- struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
-
- if (be_mcc_compl_is_new(compl)) {
- queue_tail_inc(mcc_cq);
- return compl;
- }
- return NULL;
-}
-
-void be_async_mcc_enable(struct be_adapter *adapter)
-{
- spin_lock_bh(&adapter->mcc_cq_lock);
-
- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
- adapter->mcc_obj.rearm_cq = true;
-
- spin_unlock_bh(&adapter->mcc_cq_lock);
-}
-
-void be_async_mcc_disable(struct be_adapter *adapter)
-{
- adapter->mcc_obj.rearm_cq = false;
-}
-
-int be_process_mcc(struct be_adapter *adapter, int *status)
-{
- struct be_mcc_compl *compl;
- int num = 0;
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
-
- spin_lock_bh(&adapter->mcc_cq_lock);
- while ((compl = be_mcc_compl_get(adapter))) {
- if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(compl->flags))
- be_async_link_state_process(adapter,
- (struct be_async_event_link_state *) compl);
- else if (is_grp5_evt(compl->flags))
- be_async_grp5_evt_process(adapter,
- compl->flags, compl);
- } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- *status = be_mcc_compl_process(adapter, compl);
- atomic_dec(&mcc_obj->q.used);
- }
- be_mcc_compl_use(compl);
- num++;
- }
-
- spin_unlock_bh(&adapter->mcc_cq_lock);
- return num;
-}
-
-/* Wait till no more pending mcc requests are present */
-static int be_mcc_wait_compl(struct be_adapter *adapter)
-{
-#define mcc_timeout 120000 /* 12s timeout */
- int i, num, status = 0;
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
-
- if (adapter->eeh_err)
- return -EIO;
-
- for (i = 0; i < mcc_timeout; i++) {
- num = be_process_mcc(adapter, &status);
- if (num)
- be_cq_notify(adapter, mcc_obj->cq.id,
- mcc_obj->rearm_cq, num);
-
- if (atomic_read(&mcc_obj->q.used) == 0)
- break;
- udelay(100);
- }
- if (i == mcc_timeout) {
- dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
- return -1;
- }
- return status;
-}
-
-/* Notify MCC requests and wait for completion */
-static int be_mcc_notify_wait(struct be_adapter *adapter)
-{
- be_mcc_notify(adapter);
- return be_mcc_wait_compl(adapter);
-}
-
-static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
-{
- int msecs = 0;
- u32 ready;
-
- if (adapter->eeh_err) {
- dev_err(&adapter->pdev->dev,
- "Error detected in card.Cannot issue commands\n");
- return -EIO;
- }
-
- do {
- ready = ioread32(db);
- if (ready == 0xffffffff) {
- dev_err(&adapter->pdev->dev,
- "pci slot disconnected\n");
- return -1;
- }
-
- ready &= MPU_MAILBOX_DB_RDY_MASK;
- if (ready)
- break;
-
- if (msecs > 4000) {
- dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- if (!lancer_chip(adapter))
- be_detect_dump_ue(adapter);
- return -1;
- }
-
- msleep(1);
- msecs++;
- } while (true);
-
- return 0;
-}
-
-/*
- * Insert the mailbox address into the doorbell in two steps
- * Polls on the mbox doorbell till a command completion (or a timeout) occurs
- */
-static int be_mbox_notify_wait(struct be_adapter *adapter)
-{
- int status;
- u32 val = 0;
- void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
- struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
- struct be_mcc_mailbox *mbox = mbox_mem->va;
- struct be_mcc_compl *compl = &mbox->compl;
-
- /* wait for ready to be set */
- status = be_mbox_db_ready_wait(adapter, db);
- if (status != 0)
- return status;
-
- val |= MPU_MAILBOX_DB_HI_MASK;
- /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
- val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
- iowrite32(val, db);
-
- /* wait for ready to be set */
- status = be_mbox_db_ready_wait(adapter, db);
- if (status != 0)
- return status;
-
- val = 0;
- /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
- val |= (u32)(mbox_mem->dma >> 4) << 2;
- iowrite32(val, db);
-
- status = be_mbox_db_ready_wait(adapter, db);
- if (status != 0)
- return status;
-
- /* A cq entry has been made now */
- if (be_mcc_compl_is_new(compl)) {
- status = be_mcc_compl_process(adapter, &mbox->compl);
- be_mcc_compl_use(compl);
- if (status)
- return status;
- } else {
- dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
- return -1;
- }
- return 0;
-}
-
-static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
-{
- u32 sem;
-
- if (lancer_chip(adapter))
- sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
- else
- sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
-
- *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
- if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
- return -1;
- else
- return 0;
-}
-
-int be_cmd_POST(struct be_adapter *adapter)
-{
- u16 stage;
- int status, timeout = 0;
- struct device *dev = &adapter->pdev->dev;
-
- do {
- status = be_POST_stage_get(adapter, &stage);
- if (status) {
- dev_err(dev, "POST error; stage=0x%x\n", stage);
- return -1;
- } else if (stage != POST_STAGE_ARMFW_RDY) {
- if (msleep_interruptible(2000)) {
- dev_err(dev, "Waiting for POST aborted\n");
- return -EINTR;
- }
- timeout += 2;
- } else {
- return 0;
- }
- } while (timeout < 40);
-
- dev_err(dev, "POST timeout; stage=0x%x\n", stage);
- return -1;
-}
-
-static inline void *embedded_payload(struct be_mcc_wrb *wrb)
-{
- return wrb->payload.embedded_payload;
-}
-
-static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
-{
- return &wrb->payload.sgl[0];
-}
-
-/* Don't touch the hdr after it's prepared */
-static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
- bool embedded, u8 sge_cnt, u32 opcode)
-{
- if (embedded)
- wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
- else
- wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
- MCC_WRB_SGE_CNT_SHIFT;
- wrb->payload_length = payload_len;
- wrb->tag0 = opcode;
- be_dws_cpu_to_le(wrb, 8);
-}
-
-/* Don't touch the hdr after it's prepared */
-static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len)
-{
- req_hdr->opcode = opcode;
- req_hdr->subsystem = subsystem;
- req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
- req_hdr->version = 0;
-}
-
-static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
- struct be_dma_mem *mem)
-{
- int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
- u64 dma = (u64)mem->dma;
-
- for (i = 0; i < buf_pages; i++) {
- pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
- pages[i].hi = cpu_to_le32(upper_32_bits(dma));
- dma += PAGE_SIZE_4K;
- }
-}
-
-/* Converts interrupt delay in microseconds to multiplier value */
-static u32 eq_delay_to_mult(u32 usec_delay)
-{
-#define MAX_INTR_RATE 651042
- const u32 round = 10;
- u32 multiplier;
-
- if (usec_delay == 0)
- multiplier = 0;
- else {
- u32 interrupt_rate = 1000000 / usec_delay;
- /* Max delay, corresponding to the lowest interrupt rate */
- if (interrupt_rate == 0)
- multiplier = 1023;
- else {
- multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
- multiplier /= interrupt_rate;
- /* Round the multiplier to the closest value.*/
- multiplier = (multiplier + round/2) / round;
- multiplier = min(multiplier, (u32)1023);
- }
- }
- return multiplier;
-}
-
-static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
-{
- struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
- struct be_mcc_wrb *wrb
- = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
- memset(wrb, 0, sizeof(*wrb));
- return wrb;
-}
-
-static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
-{
- struct be_queue_info *mccq = &adapter->mcc_obj.q;
- struct be_mcc_wrb *wrb;
-
- if (atomic_read(&mccq->used) >= mccq->len) {
- dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
- return NULL;
- }
-
- wrb = queue_head_node(mccq);
- queue_head_inc(mccq);
- atomic_inc(&mccq->used);
- memset(wrb, 0, sizeof(*wrb));
- return wrb;
-}
-
-/* Tell fw we're about to start firing cmds by writing a
- * special pattern across the wrb hdr; uses mbox
- */
-int be_cmd_fw_init(struct be_adapter *adapter)
-{
- u8 *wrb;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = (u8 *)wrb_from_mbox(adapter);
- *wrb++ = 0xFF;
- *wrb++ = 0x12;
- *wrb++ = 0x34;
- *wrb++ = 0xFF;
- *wrb++ = 0xFF;
- *wrb++ = 0x56;
- *wrb++ = 0x78;
- *wrb = 0xFF;
-
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* Tell fw we're done with firing cmds by writing a
- * special pattern across the wrb hdr; uses mbox
- */
-int be_cmd_fw_clean(struct be_adapter *adapter)
-{
- u8 *wrb;
- int status;
-
- if (adapter->eeh_err)
- return -EIO;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = (u8 *)wrb_from_mbox(adapter);
- *wrb++ = 0xFF;
- *wrb++ = 0xAA;
- *wrb++ = 0xBB;
- *wrb++ = 0xFF;
- *wrb++ = 0xFF;
- *wrb++ = 0xCC;
- *wrb++ = 0xDD;
- *wrb = 0xFF;
-
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_eq_create *req;
- struct be_dma_mem *q_mem = &eq->dma_mem;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_EQ_CREATE, sizeof(*req));
-
- req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-
- AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
- /* 4byte eqe*/
- AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
- AMAP_SET_BITS(struct amap_eq_context, count, req->context,
- __ilog2_u32(eq->len/256));
- AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
- eq_delay_to_mult(eq_delay));
- be_dws_cpu_to_le(req->context, sizeof(req->context));
-
- be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
- eq->id = le16_to_cpu(resp->eq_id);
- eq->created = true;
- }
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* Uses mbox */
-int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_mac_query *req;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_NTWK_MAC_QUERY);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
-
- req->type = type;
- if (permanent) {
- req->permanent = 1;
- } else {
- req->if_id = cpu_to_le16((u16) if_handle);
- req->permanent = 0;
- }
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
- memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
- }
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* Uses synchronous MCCQ */
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_pmac_add *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_NTWK_PMAC_ADD);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
-
- req->hdr.domain = domain;
- req->if_id = cpu_to_le32(if_id);
- memcpy(req->mac_address, mac_addr, ETH_ALEN);
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
- *pmac_id = le32_to_cpu(resp->pmac_id);
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_pmac_del *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_NTWK_PMAC_DEL);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
-
- req->hdr.domain = dom;
- req->if_id = cpu_to_le32(if_id);
- req->pmac_id = cpu_to_le32(pmac_id);
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses Mbox */
-int be_cmd_cq_create(struct be_adapter *adapter,
- struct be_queue_info *cq, struct be_queue_info *eq,
- bool sol_evts, bool no_delay, int coalesce_wm)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_cq_create *req;
- struct be_dma_mem *q_mem = &cq->dma_mem;
- void *ctxt;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
- ctxt = &req->context;
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_CQ_CREATE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_CQ_CREATE, sizeof(*req));
-
- req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- if (lancer_chip(adapter)) {
- req->hdr.version = 2;
- req->page_size = 1; /* 1 for 4K */
- AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
- no_delay);
- AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
- __ilog2_u32(cq->len/256));
- AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
- ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
- ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
- } else {
- AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
- coalesce_wm);
- AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
- ctxt, no_delay);
- AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
- __ilog2_u32(cq->len/256));
- AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_be, solevent,
- ctxt, sol_evts);
- AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
- }
-
- be_dws_cpu_to_le(ctxt, sizeof(req->context));
-
- be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
- cq->id = le16_to_cpu(resp->cq_id);
- cq->created = true;
- }
-
- mutex_unlock(&adapter->mbox_lock);
-
- return status;
-}
-
-static u32 be_encoded_q_len(int q_len)
-{
- u32 len_encoded = fls(q_len); /* log2(len) + 1 */
- if (len_encoded == 16)
- len_encoded = 0;
- return len_encoded;
-}
-
-int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_mcc_ext_create *req;
- struct be_dma_mem *q_mem = &mccq->dma_mem;
- void *ctxt;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
- ctxt = &req->context;
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_MCC_CREATE_EXT);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
-
- req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
- if (lancer_chip(adapter)) {
- req->hdr.version = 1;
- req->cq_id = cpu_to_le16(cq->id);
-
- AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
- ctxt, cq->id);
- AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
- ctxt, 1);
-
- } else {
- AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
- }
-
- /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
- req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
- be_dws_cpu_to_le(ctxt, sizeof(req->context));
-
- be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
- mccq->id = le16_to_cpu(resp->id);
- mccq->created = true;
- }
- mutex_unlock(&adapter->mbox_lock);
-
- return status;
-}
-
-int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_mcc_create *req;
- struct be_dma_mem *q_mem = &mccq->dma_mem;
- void *ctxt;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
- ctxt = &req->context;
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_MCC_CREATE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req));
-
- req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-
- AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
-
- be_dws_cpu_to_le(ctxt, sizeof(req->context));
-
- be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
- mccq->id = le16_to_cpu(resp->id);
- mccq->created = true;
- }
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
-{
- int status;
-
- status = be_cmd_mccq_ext_create(adapter, mccq, cq);
- if (status && !lancer_chip(adapter)) {
- dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
- "or newer to avoid conflicting priorities between NIC "
- "and FCoE traffic");
- status = be_cmd_mccq_org_create(adapter, mccq, cq);
- }
- return status;
-}
-
-int be_cmd_txq_create(struct be_adapter *adapter,
- struct be_queue_info *txq,
- struct be_queue_info *cq)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_eth_tx_create *req;
- struct be_dma_mem *q_mem = &txq->dma_mem;
- void *ctxt;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
- ctxt = &req->context;
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_ETH_TX_CREATE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
- sizeof(*req));
-
- if (lancer_chip(adapter)) {
- req->hdr.version = 1;
- AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
- adapter->if_handle);
- }
-
- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
- req->ulp_num = BE_ULP1_NUM;
- req->type = BE_ETH_TX_RING_TYPE_STANDARD;
-
- AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
- be_encoded_q_len(txq->len));
- AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
-
- be_dws_cpu_to_le(ctxt, sizeof(req->context));
-
- be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
- txq->id = le16_to_cpu(resp->cid);
- txq->created = true;
- }
-
- mutex_unlock(&adapter->mbox_lock);
-
- return status;
-}
-
-/* Uses MCC */
-int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_eth_rx_create *req;
- struct be_dma_mem *q_mem = &rxq->dma_mem;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_ETH_RX_CREATE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
- sizeof(*req));
-
- req->cq_id = cpu_to_le16(cq_id);
- req->frag_size = fls(frag_size) - 1;
- req->num_pages = 2;
- be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
- req->interface_id = cpu_to_le32(if_id);
- req->max_frame_size = cpu_to_le16(max_frame_size);
- req->rss_queue = cpu_to_le32(rss);
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
- rxq->id = le16_to_cpu(resp->id);
- rxq->created = true;
- *rss_id = resp->rss_id;
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Generic destroyer function for all types of queues
- * Uses Mbox
- */
-int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int queue_type)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_q_destroy *req;
- u8 subsys = 0, opcode = 0;
- int status;
-
- if (adapter->eeh_err)
- return -EIO;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- switch (queue_type) {
- case QTYPE_EQ:
- subsys = CMD_SUBSYSTEM_COMMON;
- opcode = OPCODE_COMMON_EQ_DESTROY;
- break;
- case QTYPE_CQ:
- subsys = CMD_SUBSYSTEM_COMMON;
- opcode = OPCODE_COMMON_CQ_DESTROY;
- break;
- case QTYPE_TXQ:
- subsys = CMD_SUBSYSTEM_ETH;
- opcode = OPCODE_ETH_TX_DESTROY;
- break;
- case QTYPE_RXQ:
- subsys = CMD_SUBSYSTEM_ETH;
- opcode = OPCODE_ETH_RX_DESTROY;
- break;
- case QTYPE_MCCQ:
- subsys = CMD_SUBSYSTEM_COMMON;
- opcode = OPCODE_COMMON_MCC_DESTROY;
- break;
- default:
- BUG();
- }
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
-
- be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
- req->id = cpu_to_le16(q->id);
-
- status = be_mbox_notify_wait(adapter);
- if (!status)
- q->created = false;
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* Uses MCC */
-int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_q_destroy *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
- sizeof(*req));
- req->id = cpu_to_le16(q->id);
-
- status = be_mcc_notify_wait(adapter);
- if (!status)
- q->created = false;
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Create an rx filtering policy configuration on an i/f
- * Uses mbox
- */
-int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
- u32 domain)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_if_create *req;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
-
- req->hdr.domain = domain;
- req->capability_flags = cpu_to_le32(cap_flags);
- req->enable_flags = cpu_to_le32(en_flags);
- req->pmac_invalid = pmac_invalid;
- if (!pmac_invalid)
- memcpy(req->mac_addr, mac, ETH_ALEN);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
- *if_handle = le32_to_cpu(resp->interface_id);
- if (!pmac_invalid)
- *pmac_id = le32_to_cpu(resp->pmac_id);
- }
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* Uses mbox */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_if_destroy *req;
- int status;
-
- if (adapter->eeh_err)
- return -EIO;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
-
- req->hdr.domain = domain;
- req->interface_id = cpu_to_le32(interface_id);
-
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
-
- return status;
-}
-
-/* Get stats is a non embedded command: the request is not embedded inside
- * WRB but is a separate dma memory block
- * Uses asynchronous MCC
- */
-int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_hdr *hdr;
- struct be_sge *sge;
- int status = 0;
-
- if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- hdr = nonemb_cmd->va;
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
- OPCODE_ETH_GET_STATISTICS);
-
- be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
-
- if (adapter->generation == BE_GEN3)
- hdr->version = 1;
-
- wrb->tag1 = CMD_SUBSYSTEM_ETH;
- sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(nonemb_cmd->size);
-
- be_mcc_notify(adapter);
- adapter->stats_cmd_sent = true;
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Lancer Stats */
-int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
-{
-
- struct be_mcc_wrb *wrb;
- struct lancer_cmd_req_pport_stats *req;
- struct be_sge *sge;
- int status = 0;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = nonemb_cmd->va;
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
- OPCODE_ETH_GET_PPORT_STATS);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
-
-
- req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
- req->cmd_params.params.reset_stats = 0;
-
- wrb->tag1 = CMD_SUBSYSTEM_ETH;
- sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(nonemb_cmd->size);
-
- be_mcc_notify(adapter);
- adapter->stats_cmd_sent = true;
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses synchronous mcc */
-int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
- u16 *link_speed, u32 dom)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_link_status *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
- if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
- *link_speed = le16_to_cpu(resp->link_speed);
- *mac_speed = resp->mac_speed;
- }
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses synchronous mcc */
-int be_cmd_get_die_temperature(struct be_adapter *adapter)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_cntl_addnl_attribs *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_get_cntl_addnl_attribs *resp =
- embedded_payload(wrb);
- adapter->drv_stats.be_on_die_temperature =
- resp->on_die_temperature;
- }
- /* If IOCTL fails once, do not bother issuing it again */
- else
- be_get_temp_freq = 0;
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses synchronous mcc */
-int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_fat *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_MANAGE_FAT);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
- req->fat_operation = cpu_to_le32(QUERY_FAT);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
- if (log_size && resp->log_size)
- *log_size = le32_to_cpu(resp->log_size) -
- sizeof(u32);
- }
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
-{
- struct be_dma_mem get_fat_cmd;
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_fat *req;
- struct be_sge *sge;
- u32 offset = 0, total_size, buf_size,
- log_offset = sizeof(u32), payload_len;
- int status;
-
- if (buf_len == 0)
- return;
-
- total_size = buf_len;
-
- get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
- get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
- if (!get_fat_cmd.va) {
- status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while retrieving FAT data\n");
- return;
- }
-
- spin_lock_bh(&adapter->mcc_lock);
-
- while (total_size) {
- buf_size = min(total_size, (u32)60*1024);
- total_size -= buf_size;
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = get_fat_cmd.va;
- sge = nonembedded_sgl(wrb);
-
- payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
- be_wrb_hdr_prepare(wrb, payload_len, false, 1,
- OPCODE_COMMON_MANAGE_FAT);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, payload_len);
-
- sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
- sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(get_fat_cmd.size);
-
- req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
- req->read_log_offset = cpu_to_le32(log_offset);
- req->read_log_length = cpu_to_le32(buf_size);
- req->data_buffer_size = cpu_to_le32(buf_size);
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
- memcpy(buf + offset,
- resp->data_buffer,
- resp->read_log_length);
- } else {
- dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
- goto err;
- }
- offset += buf_size;
- log_offset += buf_size;
- }
-err:
- pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va,
- get_fat_cmd.dma);
- spin_unlock_bh(&adapter->mcc_lock);
-}
-
-/* Uses Mbox */
-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_fw_version *req;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_GET_FW_VERSION);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
- }
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* set the EQ delay interval of an EQ to specified value
- * Uses async mcc
- */
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_modify_eq_delay *req;
- int status = 0;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_MODIFY_EQ_DELAY);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
-
- req->num_eq = cpu_to_le32(1);
- req->delay[0].eq_id = cpu_to_le32(eq_id);
- req->delay[0].phase = 0;
- req->delay[0].delay_multiplier = cpu_to_le32(eqd);
-
- be_mcc_notify(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses sycnhronous mcc */
-int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool untagged, bool promiscuous)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_vlan_config *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_NTWK_VLAN_CONFIG);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
-
- req->interface_id = if_id;
- req->promiscuous = promiscuous;
- req->untagged = untagged;
- req->num_vlan = num;
- if (!promiscuous) {
- memcpy(req->normal_vlan, vtag_array,
- req->num_vlan * sizeof(vtag_array[0]));
- }
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
-{
- struct be_mcc_wrb *wrb;
- struct be_dma_mem *mem = &adapter->rx_filter;
- struct be_cmd_req_rx_filter *req = mem->va;
- struct be_sge *sge;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- sge = nonembedded_sgl(wrb);
- sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
- sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(mem->size);
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
- OPCODE_COMMON_NTWK_RX_FILTER);
-
- memset(req, 0, sizeof(*req));
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
-
- req->if_id = cpu_to_le32(adapter->if_handle);
- if (flags & IFF_PROMISC) {
- req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS);
- if (value == ON)
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS);
- } else if (flags & IFF_ALLMULTI) {
- req->if_flags_mask = req->if_flags =
- cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
- } else {
- struct netdev_hw_addr *ha;
- int i = 0;
-
- req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
- netdev_for_each_mc_addr(ha, adapter->netdev)
- memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
- }
-
- status = be_mcc_notify_wait(adapter);
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses synchrounous mcc */
-int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_set_flow_control *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_SET_FLOW_CONTROL);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
-
- req->tx_flow_control = cpu_to_le16((u16)tx_fc);
- req->rx_flow_control = cpu_to_le16((u16)rx_fc);
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses sycn mcc */
-int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_flow_control *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_GET_FLOW_CONTROL);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_get_flow_control *resp =
- embedded_payload(wrb);
- *tx_fc = le16_to_cpu(resp->tx_flow_control);
- *rx_fc = le16_to_cpu(resp->rx_flow_control);
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
- u32 *mode, u32 *caps)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_query_fw_cfg *req;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
- *port_num = le32_to_cpu(resp->phys_port);
- *mode = le32_to_cpu(resp->function_mode);
- *caps = le32_to_cpu(resp->function_caps);
- }
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* Uses mbox */
-int be_cmd_reset_function(struct be_adapter *adapter)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_hdr *req;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_FUNCTION_RESET);
-
- be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
-
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
- 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_ETH_RSS_CONFIG);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RSS_CONFIG, sizeof(*req));
-
- req->if_id = cpu_to_le32(adapter->if_handle);
- req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
- req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
- memcpy(req->cpu_table, rsstable, table_size);
- memcpy(req->hash, myhash, sizeof(myhash));
- be_dws_cpu_to_le(req->hash, sizeof(req->hash));
-
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
-
-/* Uses sync mcc */
-int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
- u8 bcn, u8 sts, u8 state)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_enable_disable_beacon *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_ENABLE_DISABLE_BEACON);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
-
- req->port_num = port_num;
- req->beacon_state = state;
- req->beacon_duration = bcn;
- req->status_duration = sts;
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-/* Uses sync mcc */
-int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_beacon_state *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_GET_BEACON_STATE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
-
- req->port_num = port_num;
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_get_beacon_state *resp =
- embedded_payload(wrb);
- *state = resp->beacon_state;
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_written, u8 *addn_status)
-{
- struct be_mcc_wrb *wrb;
- struct lancer_cmd_req_write_object *req;
- struct lancer_cmd_resp_write_object *resp;
- void *ctxt = NULL;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
- adapter->flash_status = 0;
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err_unlock;
- }
-
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object),
- true, 1, OPCODE_COMMON_WRITE_OBJECT);
- wrb->tag1 = CMD_SUBSYSTEM_COMMON;
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_OBJECT,
- sizeof(struct lancer_cmd_req_write_object));
-
- ctxt = &req->context;
- AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- write_length, ctxt, data_size);
-
- if (data_size == 0)
- AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 1);
- else
- AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 0);
-
- be_dws_cpu_to_le(ctxt, sizeof(req->context));
- req->write_offset = cpu_to_le32(data_offset);
- strcpy(req->object_name, obj_name);
- req->descriptor_count = cpu_to_le32(1);
- req->buf_len = cpu_to_le32(data_size);
- req->addr_low = cpu_to_le32((cmd->dma +
- sizeof(struct lancer_cmd_req_write_object))
- & 0xFFFFFFFF);
- req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
- sizeof(struct lancer_cmd_req_write_object)));
-
- be_mcc_notify(adapter);
- spin_unlock_bh(&adapter->mcc_lock);
-
- if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(12000)))
- status = -1;
- else
- status = adapter->flash_status;
-
- resp = embedded_payload(wrb);
- if (!status) {
- *data_written = le32_to_cpu(resp->actual_write_len);
- } else {
- *addn_status = resp->additional_status;
- status = resp->status;
- }
-
- return status;
-
-err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 buf_size)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req;
- struct be_sge *sge;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
- adapter->flash_status = 0;
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err_unlock;
- }
- req = cmd->va;
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
- OPCODE_COMMON_WRITE_FLASHROM);
- wrb->tag1 = CMD_SUBSYSTEM_COMMON;
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
- sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
- sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(cmd->size);
-
- req->params.op_type = cpu_to_le32(flash_type);
- req->params.op_code = cpu_to_le32(flash_opcode);
- req->params.data_buf_size = cpu_to_le32(buf_size);
-
- be_mcc_notify(adapter);
- spin_unlock_bh(&adapter->mcc_lock);
-
- if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(12000)))
- status = -1;
- else
- status = adapter->flash_status;
-
- return status;
-
-err_unlock:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
- OPCODE_COMMON_READ_FLASHROM);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
-
- req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
- req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
- req->params.offset = cpu_to_le32(offset);
- req->params.data_buf_size = cpu_to_le32(0x4);
-
- status = be_mcc_notify_wait(adapter);
- if (!status)
- memcpy(flashed_crc, req->params.data_buf, 4);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_acpi_wol_magic_config *req;
- struct be_sge *sge;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = nonemb_cmd->va;
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
- OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
- memcpy(req->magic_mac, mac, ETH_ALEN);
-
- sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(nonemb_cmd->size);
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
- u8 loopback_type, u8 enable)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_set_lmode *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
-
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
- sizeof(*req));
-
- req->src_port = port_num;
- req->dest_port = port_num;
- req->loopback_type = loopback_type;
- req->loopback_state = enable;
-
- status = be_mcc_notify_wait(adapter);
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_loopback_test *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
-
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_LOWLEVEL_LOOPBACK_TEST);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
- req->hdr.timeout = cpu_to_le32(4);
-
- req->pattern = cpu_to_le64(pattern);
- req->src_port = cpu_to_le32(port_num);
- req->dest_port = cpu_to_le32(port_num);
- req->pkt_size = cpu_to_le32(pkt_size);
- req->num_pkts = cpu_to_le32(num_pkts);
- req->loopback_type = cpu_to_le32(loopback_type);
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
- status = le32_to_cpu(resp->status);
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_ddrdma_test *req;
- struct be_sge *sge;
- int status;
- int i, j = 0;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = cmd->va;
- sge = nonembedded_sgl(wrb);
- be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
- OPCODE_LOWLEVEL_HOST_DDR_DMA);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
-
- sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
- sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(cmd->size);
-
- req->pattern = cpu_to_le64(pattern);
- req->byte_count = cpu_to_le32(byte_cnt);
- for (i = 0; i < byte_cnt; i++) {
- req->snd_buff[i] = (u8)(pattern >> (j*8));
- j++;
- if (j > 7)
- j = 0;
- }
-
- status = be_mcc_notify_wait(adapter);
-
- if (!status) {
- struct be_cmd_resp_ddrdma_test *resp;
- resp = cmd->va;
- if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
- resp->snd_err) {
- status = -1;
- }
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_seeprom_read *req;
- struct be_sge *sge;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = nonemb_cmd->va;
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
- OPCODE_COMMON_SEEPROM_READ);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
-
- sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(nonemb_cmd->size);
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_get_phy_info(struct be_adapter *adapter,
- struct be_phy_info *phy_info)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_get_phy_info *req;
- struct be_sge *sge;
- struct be_dma_mem cmd;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
- status = -ENOMEM;
- goto err;
- }
-
- req = cmd.va;
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
- OPCODE_COMMON_GET_PHY_DETAILS);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_PHY_DETAILS,
- sizeof(*req));
-
- sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
- sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(cmd.size);
-
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_phy_info *resp_phy_info =
- cmd.va + sizeof(struct be_cmd_req_hdr);
- phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
- phy_info->interface_type =
- le16_to_cpu(resp_phy_info->interface_type);
- }
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_set_qos *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
-
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_SET_QOS);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_QOS, sizeof(*req));
-
- req->hdr.domain = domain;
- req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
- req->max_bps_nic = cpu_to_le32(bps);
-
- status = be_mcc_notify_wait(adapter);
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
-int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_cntl_attribs *req;
- struct be_cmd_resp_cntl_attribs *resp;
- struct be_sge *sge;
- int status;
- int payload_len = max(sizeof(*req), sizeof(*resp));
- struct mgmt_controller_attrib *attribs;
- struct be_dma_mem attribs_cmd;
-
- memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
- attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
- attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
- if (!attribs_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
- return -ENOMEM;
- }
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = attribs_cmd.va;
- sge = nonembedded_sgl(wrb);
-
- be_wrb_hdr_prepare(wrb, payload_len, false, 1,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
- sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
- sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
- sge->len = cpu_to_le32(attribs_cmd.size);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
- adapter->hba_port_num = attribs->hba_attribs.phy_port;
- }
-
-err:
- mutex_unlock(&adapter->mbox_lock);
- pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
- attribs_cmd.dma);
- return status;
-}
-
-/* Uses mbox */
-int be_cmd_req_native_mode(struct be_adapter *adapter)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_set_func_cap *req;
- int status;
-
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
-
- wrb = wrb_from_mbox(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
-
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
- OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
-
- req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
- CAPABILITY_BE3_NATIVE_ERX_API);
- req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
-
- status = be_mbox_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
- adapter->be3_native = le32_to_cpu(resp->cap_flags) &
- CAPABILITY_BE3_NATIVE_ERX_API;
- }
-err:
- mutex_unlock(&adapter->mbox_lock);
- return status;
-}
+++ /dev/null
-/*
- * Copyright (C) 2005 - 2011 Emulex
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-/*
- * The driver sends configuration and managements command requests to the
- * firmware in the BE. These requests are communicated to the processor
- * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
- * WRB inside a MAILBOX.
- * The commands are serviced by the ARM processor in the BladeEngine's MPU.
- */
-
-struct be_sge {
- u32 pa_lo;
- u32 pa_hi;
- u32 len;
-};
-
-#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
-#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
-#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
-struct be_mcc_wrb {
- u32 embedded; /* dword 0 */
- u32 payload_length; /* dword 1 */
- u32 tag0; /* dword 2 */
- u32 tag1; /* dword 3 */
- u32 rsvd; /* dword 4 */
- union {
- u8 embedded_payload[236]; /* used by embedded cmds */
- struct be_sge sgl[19]; /* used by non-embedded cmds */
- } payload;
-};
-
-#define CQE_FLAGS_VALID_MASK (1 << 31)
-#define CQE_FLAGS_ASYNC_MASK (1 << 30)
-#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
-#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
-
-/* Completion Status */
-enum {
- MCC_STATUS_SUCCESS = 0,
- MCC_STATUS_FAILED = 1,
- MCC_STATUS_ILLEGAL_REQUEST = 2,
- MCC_STATUS_ILLEGAL_FIELD = 3,
- MCC_STATUS_INSUFFICIENT_BUFFER = 4,
- MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
- MCC_STATUS_NOT_SUPPORTED = 66
-};
-
-#define CQE_STATUS_COMPL_MASK 0xFFFF
-#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK 0xFFFF
-#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
-
-struct be_mcc_compl {
- u32 status; /* dword 0 */
- u32 tag0; /* dword 1 */
- u32 tag1; /* dword 2 */
- u32 flags; /* dword 3 */
-};
-
-/* When the async bit of mcc_compl is set, the last 4 bytes of
- * mcc_compl is interpreted as follows:
- */
-#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
-#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
-#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
-#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
-#define ASYNC_EVENT_CODE_LINK_STATE 0x1
-#define ASYNC_EVENT_CODE_GRP_5 0x5
-#define ASYNC_EVENT_QOS_SPEED 0x1
-#define ASYNC_EVENT_COS_PRIORITY 0x2
-#define ASYNC_EVENT_PVID_STATE 0x3
-struct be_async_event_trailer {
- u32 code;
-};
-
-enum {
- LINK_DOWN = 0x0,
- LINK_UP = 0x1
-};
-#define LINK_STATUS_MASK 0x1
-
-/* When the event code of an async trailer is link-state, the mcc_compl
- * must be interpreted as follows
- */
-struct be_async_event_link_state {
- u8 physical_port;
- u8 port_link_status;
- u8 port_duplex;
- u8 port_speed;
- u8 port_fault;
- u8 rsvd0[7];
- struct be_async_event_trailer trailer;
-} __packed;
-
-/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
- * the mcc_compl must be interpreted as follows
- */
-struct be_async_event_grp5_qos_link_speed {
- u8 physical_port;
- u8 rsvd[5];
- u16 qos_link_speed;
- u32 event_tag;
- struct be_async_event_trailer trailer;
-} __packed;
-
-/* When the event code of an async trailer is GRP5 and event type is
- * CoS-Priority, the mcc_compl must be interpreted as follows
- */
-struct be_async_event_grp5_cos_priority {
- u8 physical_port;
- u8 available_priority_bmap;
- u8 reco_default_priority;
- u8 valid;
- u8 rsvd0;
- u8 event_tag;
- struct be_async_event_trailer trailer;
-} __packed;
-
-/* When the event code of an async trailer is GRP5 and event type is
- * PVID state, the mcc_compl must be interpreted as follows
- */
-struct be_async_event_grp5_pvid_state {
- u8 enabled;
- u8 rsvd0;
- u16 tag;
- u32 event_tag;
- u32 rsvd1;
- struct be_async_event_trailer trailer;
-} __packed;
-
-struct be_mcc_mailbox {
- struct be_mcc_wrb wrb;
- struct be_mcc_compl compl;
-};
-
-#define CMD_SUBSYSTEM_COMMON 0x1
-#define CMD_SUBSYSTEM_ETH 0x3
-#define CMD_SUBSYSTEM_LOWLEVEL 0xb
-
-#define OPCODE_COMMON_NTWK_MAC_QUERY 1
-#define OPCODE_COMMON_NTWK_MAC_SET 2
-#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
-#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
-#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
-#define OPCODE_COMMON_READ_FLASHROM 6
-#define OPCODE_COMMON_WRITE_FLASHROM 7
-#define OPCODE_COMMON_CQ_CREATE 12
-#define OPCODE_COMMON_EQ_CREATE 13
-#define OPCODE_COMMON_MCC_CREATE 21
-#define OPCODE_COMMON_SET_QOS 28
-#define OPCODE_COMMON_MCC_CREATE_EXT 90
-#define OPCODE_COMMON_SEEPROM_READ 30
-#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
-#define OPCODE_COMMON_NTWK_RX_FILTER 34
-#define OPCODE_COMMON_GET_FW_VERSION 35
-#define OPCODE_COMMON_SET_FLOW_CONTROL 36
-#define OPCODE_COMMON_GET_FLOW_CONTROL 37
-#define OPCODE_COMMON_SET_FRAME_SIZE 39
-#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
-#define OPCODE_COMMON_FIRMWARE_CONFIG 42
-#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
-#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
-#define OPCODE_COMMON_MCC_DESTROY 53
-#define OPCODE_COMMON_CQ_DESTROY 54
-#define OPCODE_COMMON_EQ_DESTROY 55
-#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
-#define OPCODE_COMMON_NTWK_PMAC_ADD 59
-#define OPCODE_COMMON_NTWK_PMAC_DEL 60
-#define OPCODE_COMMON_FUNCTION_RESET 61
-#define OPCODE_COMMON_MANAGE_FAT 68
-#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
-#define OPCODE_COMMON_GET_BEACON_STATE 70
-#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
-#define OPCODE_COMMON_GET_PHY_DETAILS 102
-#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
-#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
-#define OPCODE_COMMON_WRITE_OBJECT 172
-
-#define OPCODE_ETH_RSS_CONFIG 1
-#define OPCODE_ETH_ACPI_CONFIG 2
-#define OPCODE_ETH_PROMISCUOUS 3
-#define OPCODE_ETH_GET_STATISTICS 4
-#define OPCODE_ETH_TX_CREATE 7
-#define OPCODE_ETH_RX_CREATE 8
-#define OPCODE_ETH_TX_DESTROY 9
-#define OPCODE_ETH_RX_DESTROY 10
-#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
-#define OPCODE_ETH_GET_PPORT_STATS 18
-
-#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
-#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
-#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
-
-struct be_cmd_req_hdr {
- u8 opcode; /* dword 0 */
- u8 subsystem; /* dword 0 */
- u8 port_number; /* dword 0 */
- u8 domain; /* dword 0 */
- u32 timeout; /* dword 1 */
- u32 request_length; /* dword 2 */
- u8 version; /* dword 3 */
- u8 rsvd[3]; /* dword 3 */
-};
-
-#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
-#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
-struct be_cmd_resp_hdr {
- u32 info; /* dword 0 */
- u32 status; /* dword 1 */
- u32 response_length; /* dword 2 */
- u32 actual_resp_len; /* dword 3 */
-};
-
-struct phys_addr {
- u32 lo;
- u32 hi;
-};
-
-/**************************
- * BE Command definitions *
- **************************/
-
-/* Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_eq_context {
- u8 cidx[13]; /* dword 0*/
- u8 rsvd0[3]; /* dword 0*/
- u8 epidx[13]; /* dword 0*/
- u8 valid; /* dword 0*/
- u8 rsvd1; /* dword 0*/
- u8 size; /* dword 0*/
- u8 pidx[13]; /* dword 1*/
- u8 rsvd2[3]; /* dword 1*/
- u8 pd[10]; /* dword 1*/
- u8 count[3]; /* dword 1*/
- u8 solevent; /* dword 1*/
- u8 stalled; /* dword 1*/
- u8 armed; /* dword 1*/
- u8 rsvd3[4]; /* dword 2*/
- u8 func[8]; /* dword 2*/
- u8 rsvd4; /* dword 2*/
- u8 delaymult[10]; /* dword 2*/
- u8 rsvd5[2]; /* dword 2*/
- u8 phase[2]; /* dword 2*/
- u8 nodelay; /* dword 2*/
- u8 rsvd6[4]; /* dword 2*/
- u8 rsvd7[32]; /* dword 3*/
-} __packed;
-
-struct be_cmd_req_eq_create {
- struct be_cmd_req_hdr hdr;
- u16 num_pages; /* sword */
- u16 rsvd0; /* sword */
- u8 context[sizeof(struct amap_eq_context) / 8];
- struct phys_addr pages[8];
-} __packed;
-
-struct be_cmd_resp_eq_create {
- struct be_cmd_resp_hdr resp_hdr;
- u16 eq_id; /* sword */
- u16 rsvd0; /* sword */
-} __packed;
-
-/******************** Mac query ***************************/
-enum {
- MAC_ADDRESS_TYPE_STORAGE = 0x0,
- MAC_ADDRESS_TYPE_NETWORK = 0x1,
- MAC_ADDRESS_TYPE_PD = 0x2,
- MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
-};
-
-struct mac_addr {
- u16 size_of_struct;
- u8 addr[ETH_ALEN];
-} __packed;
-
-struct be_cmd_req_mac_query {
- struct be_cmd_req_hdr hdr;
- u8 type;
- u8 permanent;
- u16 if_id;
-} __packed;
-
-struct be_cmd_resp_mac_query {
- struct be_cmd_resp_hdr hdr;
- struct mac_addr mac;
-};
-
-/******************** PMac Add ***************************/
-struct be_cmd_req_pmac_add {
- struct be_cmd_req_hdr hdr;
- u32 if_id;
- u8 mac_address[ETH_ALEN];
- u8 rsvd0[2];
-} __packed;
-
-struct be_cmd_resp_pmac_add {
- struct be_cmd_resp_hdr hdr;
- u32 pmac_id;
-};
-
-/******************** PMac Del ***************************/
-struct be_cmd_req_pmac_del {
- struct be_cmd_req_hdr hdr;
- u32 if_id;
- u32 pmac_id;
-};
-
-/******************** Create CQ ***************************/
-/* Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_cq_context_be {
- u8 cidx[11]; /* dword 0*/
- u8 rsvd0; /* dword 0*/
- u8 coalescwm[2]; /* dword 0*/
- u8 nodelay; /* dword 0*/
- u8 epidx[11]; /* dword 0*/
- u8 rsvd1; /* dword 0*/
- u8 count[2]; /* dword 0*/
- u8 valid; /* dword 0*/
- u8 solevent; /* dword 0*/
- u8 eventable; /* dword 0*/
- u8 pidx[11]; /* dword 1*/
- u8 rsvd2; /* dword 1*/
- u8 pd[10]; /* dword 1*/
- u8 eqid[8]; /* dword 1*/
- u8 stalled; /* dword 1*/
- u8 armed; /* dword 1*/
- u8 rsvd3[4]; /* dword 2*/
- u8 func[8]; /* dword 2*/
- u8 rsvd4[20]; /* dword 2*/
- u8 rsvd5[32]; /* dword 3*/
-} __packed;
-
-struct amap_cq_context_lancer {
- u8 rsvd0[12]; /* dword 0*/
- u8 coalescwm[2]; /* dword 0*/
- u8 nodelay; /* dword 0*/
- u8 rsvd1[12]; /* dword 0*/
- u8 count[2]; /* dword 0*/
- u8 valid; /* dword 0*/
- u8 rsvd2; /* dword 0*/
- u8 eventable; /* dword 0*/
- u8 eqid[16]; /* dword 1*/
- u8 rsvd3[15]; /* dword 1*/
- u8 armed; /* dword 1*/
- u8 rsvd4[32]; /* dword 2*/
- u8 rsvd5[32]; /* dword 3*/
-} __packed;
-
-struct be_cmd_req_cq_create {
- struct be_cmd_req_hdr hdr;
- u16 num_pages;
- u8 page_size;
- u8 rsvd0;
- u8 context[sizeof(struct amap_cq_context_be) / 8];
- struct phys_addr pages[8];
-} __packed;
-
-
-struct be_cmd_resp_cq_create {
- struct be_cmd_resp_hdr hdr;
- u16 cq_id;
- u16 rsvd0;
-} __packed;
-
-struct be_cmd_req_get_fat {
- struct be_cmd_req_hdr hdr;
- u32 fat_operation;
- u32 read_log_offset;
- u32 read_log_length;
- u32 data_buffer_size;
- u32 data_buffer[1];
-} __packed;
-
-struct be_cmd_resp_get_fat {
- struct be_cmd_resp_hdr hdr;
- u32 log_size;
- u32 read_log_length;
- u32 rsvd[2];
- u32 data_buffer[1];
-} __packed;
-
-
-/******************** Create MCCQ ***************************/
-/* Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_mcc_context_be {
- u8 con_index[14];
- u8 rsvd0[2];
- u8 ring_size[4];
- u8 fetch_wrb;
- u8 fetch_r2t;
- u8 cq_id[10];
- u8 prod_index[14];
- u8 fid[8];
- u8 pdid[9];
- u8 valid;
- u8 rsvd1[32];
- u8 rsvd2[32];
-} __packed;
-
-struct amap_mcc_context_lancer {
- u8 async_cq_id[16];
- u8 ring_size[4];
- u8 rsvd0[12];
- u8 rsvd1[31];
- u8 valid;
- u8 async_cq_valid[1];
- u8 rsvd2[31];
- u8 rsvd3[32];
-} __packed;
-
-struct be_cmd_req_mcc_create {
- struct be_cmd_req_hdr hdr;
- u16 num_pages;
- u16 cq_id;
- u8 context[sizeof(struct amap_mcc_context_be) / 8];
- struct phys_addr pages[8];
-} __packed;
-
-struct be_cmd_req_mcc_ext_create {
- struct be_cmd_req_hdr hdr;
- u16 num_pages;
- u16 cq_id;
- u32 async_event_bitmap[1];
- u8 context[sizeof(struct amap_mcc_context_be) / 8];
- struct phys_addr pages[8];
-} __packed;
-
-struct be_cmd_resp_mcc_create {
- struct be_cmd_resp_hdr hdr;
- u16 id;
- u16 rsvd0;
-} __packed;
-
-/******************** Create TxQ ***************************/
-#define BE_ETH_TX_RING_TYPE_STANDARD 2
-#define BE_ULP1_NUM 1
-
-/* Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_tx_context {
- u8 if_id[16]; /* dword 0 */
- u8 tx_ring_size[4]; /* dword 0 */
- u8 rsvd1[26]; /* dword 0 */
- u8 pci_func_id[8]; /* dword 1 */
- u8 rsvd2[9]; /* dword 1 */
- u8 ctx_valid; /* dword 1 */
- u8 cq_id_send[16]; /* dword 2 */
- u8 rsvd3[16]; /* dword 2 */
- u8 rsvd4[32]; /* dword 3 */
- u8 rsvd5[32]; /* dword 4 */
- u8 rsvd6[32]; /* dword 5 */
- u8 rsvd7[32]; /* dword 6 */
- u8 rsvd8[32]; /* dword 7 */
- u8 rsvd9[32]; /* dword 8 */
- u8 rsvd10[32]; /* dword 9 */
- u8 rsvd11[32]; /* dword 10 */
- u8 rsvd12[32]; /* dword 11 */
- u8 rsvd13[32]; /* dword 12 */
- u8 rsvd14[32]; /* dword 13 */
- u8 rsvd15[32]; /* dword 14 */
- u8 rsvd16[32]; /* dword 15 */
-} __packed;
-
-struct be_cmd_req_eth_tx_create {
- struct be_cmd_req_hdr hdr;
- u8 num_pages;
- u8 ulp_num;
- u8 type;
- u8 bound_port;
- u8 context[sizeof(struct amap_tx_context) / 8];
- struct phys_addr pages[8];
-} __packed;
-
-struct be_cmd_resp_eth_tx_create {
- struct be_cmd_resp_hdr hdr;
- u16 cid;
- u16 rsvd0;
-} __packed;
-
-/******************** Create RxQ ***************************/
-struct be_cmd_req_eth_rx_create {
- struct be_cmd_req_hdr hdr;
- u16 cq_id;
- u8 frag_size;
- u8 num_pages;
- struct phys_addr pages[2];
- u32 interface_id;
- u16 max_frame_size;
- u16 rsvd0;
- u32 rss_queue;
-} __packed;
-
-struct be_cmd_resp_eth_rx_create {
- struct be_cmd_resp_hdr hdr;
- u16 id;
- u8 rss_id;
- u8 rsvd0;
-} __packed;
-
-/******************** Q Destroy ***************************/
-/* Type of Queue to be destroyed */
-enum {
- QTYPE_EQ = 1,
- QTYPE_CQ,
- QTYPE_TXQ,
- QTYPE_RXQ,
- QTYPE_MCCQ
-};
-
-struct be_cmd_req_q_destroy {
- struct be_cmd_req_hdr hdr;
- u16 id;
- u16 bypass_flush; /* valid only for rx q destroy */
-} __packed;
-
-/************ I/f Create (it's actually I/f Config Create)**********/
-
-/* Capability flags for the i/f */
-enum be_if_flags {
- BE_IF_FLAGS_RSS = 0x4,
- BE_IF_FLAGS_PROMISCUOUS = 0x8,
- BE_IF_FLAGS_BROADCAST = 0x10,
- BE_IF_FLAGS_UNTAGGED = 0x20,
- BE_IF_FLAGS_ULP = 0x40,
- BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
- BE_IF_FLAGS_VLAN = 0x100,
- BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
- BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
- BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
- BE_IF_FLAGS_MULTICAST = 0x1000
-};
-
-/* An RX interface is an object with one or more MAC addresses and
- * filtering capabilities. */
-struct be_cmd_req_if_create {
- struct be_cmd_req_hdr hdr;
- u32 version; /* ignore currently */
- u32 capability_flags;
- u32 enable_flags;
- u8 mac_addr[ETH_ALEN];
- u8 rsvd0;
- u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
- u32 vlan_tag; /* not used currently */
-} __packed;
-
-struct be_cmd_resp_if_create {
- struct be_cmd_resp_hdr hdr;
- u32 interface_id;
- u32 pmac_id;
-};
-
-/****** I/f Destroy(it's actually I/f Config Destroy )**********/
-struct be_cmd_req_if_destroy {
- struct be_cmd_req_hdr hdr;
- u32 interface_id;
-};
-
-/*************** HW Stats Get **********************************/
-struct be_port_rxf_stats_v0 {
- u32 rx_bytes_lsd; /* dword 0*/
- u32 rx_bytes_msd; /* dword 1*/
- u32 rx_total_frames; /* dword 2*/
- u32 rx_unicast_frames; /* dword 3*/
- u32 rx_multicast_frames; /* dword 4*/
- u32 rx_broadcast_frames; /* dword 5*/
- u32 rx_crc_errors; /* dword 6*/
- u32 rx_alignment_symbol_errors; /* dword 7*/
- u32 rx_pause_frames; /* dword 8*/
- u32 rx_control_frames; /* dword 9*/
- u32 rx_in_range_errors; /* dword 10*/
- u32 rx_out_range_errors; /* dword 11*/
- u32 rx_frame_too_long; /* dword 12*/
- u32 rx_address_match_errors; /* dword 13*/
- u32 rx_vlan_mismatch; /* dword 14*/
- u32 rx_dropped_too_small; /* dword 15*/
- u32 rx_dropped_too_short; /* dword 16*/
- u32 rx_dropped_header_too_small; /* dword 17*/
- u32 rx_dropped_tcp_length; /* dword 18*/
- u32 rx_dropped_runt; /* dword 19*/
- u32 rx_64_byte_packets; /* dword 20*/
- u32 rx_65_127_byte_packets; /* dword 21*/
- u32 rx_128_256_byte_packets; /* dword 22*/
- u32 rx_256_511_byte_packets; /* dword 23*/
- u32 rx_512_1023_byte_packets; /* dword 24*/
- u32 rx_1024_1518_byte_packets; /* dword 25*/
- u32 rx_1519_2047_byte_packets; /* dword 26*/
- u32 rx_2048_4095_byte_packets; /* dword 27*/
- u32 rx_4096_8191_byte_packets; /* dword 28*/
- u32 rx_8192_9216_byte_packets; /* dword 29*/
- u32 rx_ip_checksum_errs; /* dword 30*/
- u32 rx_tcp_checksum_errs; /* dword 31*/
- u32 rx_udp_checksum_errs; /* dword 32*/
- u32 rx_non_rss_packets; /* dword 33*/
- u32 rx_ipv4_packets; /* dword 34*/
- u32 rx_ipv6_packets; /* dword 35*/
- u32 rx_ipv4_bytes_lsd; /* dword 36*/
- u32 rx_ipv4_bytes_msd; /* dword 37*/
- u32 rx_ipv6_bytes_lsd; /* dword 38*/
- u32 rx_ipv6_bytes_msd; /* dword 39*/
- u32 rx_chute1_packets; /* dword 40*/
- u32 rx_chute2_packets; /* dword 41*/
- u32 rx_chute3_packets; /* dword 42*/
- u32 rx_management_packets; /* dword 43*/
- u32 rx_switched_unicast_packets; /* dword 44*/
- u32 rx_switched_multicast_packets; /* dword 45*/
- u32 rx_switched_broadcast_packets; /* dword 46*/
- u32 tx_bytes_lsd; /* dword 47*/
- u32 tx_bytes_msd; /* dword 48*/
- u32 tx_unicastframes; /* dword 49*/
- u32 tx_multicastframes; /* dword 50*/
- u32 tx_broadcastframes; /* dword 51*/
- u32 tx_pauseframes; /* dword 52*/
- u32 tx_controlframes; /* dword 53*/
- u32 tx_64_byte_packets; /* dword 54*/
- u32 tx_65_127_byte_packets; /* dword 55*/
- u32 tx_128_256_byte_packets; /* dword 56*/
- u32 tx_256_511_byte_packets; /* dword 57*/
- u32 tx_512_1023_byte_packets; /* dword 58*/
- u32 tx_1024_1518_byte_packets; /* dword 59*/
- u32 tx_1519_2047_byte_packets; /* dword 60*/
- u32 tx_2048_4095_byte_packets; /* dword 61*/
- u32 tx_4096_8191_byte_packets; /* dword 62*/
- u32 tx_8192_9216_byte_packets; /* dword 63*/
- u32 rx_fifo_overflow; /* dword 64*/
- u32 rx_input_fifo_overflow; /* dword 65*/
-};
-
-struct be_rxf_stats_v0 {
- struct be_port_rxf_stats_v0 port[2];
- u32 rx_drops_no_pbuf; /* dword 132*/
- u32 rx_drops_no_txpb; /* dword 133*/
- u32 rx_drops_no_erx_descr; /* dword 134*/
- u32 rx_drops_no_tpre_descr; /* dword 135*/
- u32 management_rx_port_packets; /* dword 136*/
- u32 management_rx_port_bytes; /* dword 137*/
- u32 management_rx_port_pause_frames; /* dword 138*/
- u32 management_rx_port_errors; /* dword 139*/
- u32 management_tx_port_packets; /* dword 140*/
- u32 management_tx_port_bytes; /* dword 141*/
- u32 management_tx_port_pause; /* dword 142*/
- u32 management_rx_port_rxfifo_overflow; /* dword 143*/
- u32 rx_drops_too_many_frags; /* dword 144*/
- u32 rx_drops_invalid_ring; /* dword 145*/
- u32 forwarded_packets; /* dword 146*/
- u32 rx_drops_mtu; /* dword 147*/
- u32 rsvd0[7];
- u32 port0_jabber_events;
- u32 port1_jabber_events;
- u32 rsvd1[6];
-};
-
-struct be_erx_stats_v0 {
- u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
- u32 rsvd[4];
-};
-
-struct be_pmem_stats {
- u32 eth_red_drops;
- u32 rsvd[5];
-};
-
-struct be_hw_stats_v0 {
- struct be_rxf_stats_v0 rxf;
- u32 rsvd[48];
- struct be_erx_stats_v0 erx;
- struct be_pmem_stats pmem;
-};
-
-struct be_cmd_req_get_stats_v0 {
- struct be_cmd_req_hdr hdr;
- u8 rsvd[sizeof(struct be_hw_stats_v0)];
-};
-
-struct be_cmd_resp_get_stats_v0 {
- struct be_cmd_resp_hdr hdr;
- struct be_hw_stats_v0 hw_stats;
-};
-
-struct lancer_pport_stats {
- u32 tx_packets_lo;
- u32 tx_packets_hi;
- u32 tx_unicast_packets_lo;
- u32 tx_unicast_packets_hi;
- u32 tx_multicast_packets_lo;
- u32 tx_multicast_packets_hi;
- u32 tx_broadcast_packets_lo;
- u32 tx_broadcast_packets_hi;
- u32 tx_bytes_lo;
- u32 tx_bytes_hi;
- u32 tx_unicast_bytes_lo;
- u32 tx_unicast_bytes_hi;
- u32 tx_multicast_bytes_lo;
- u32 tx_multicast_bytes_hi;
- u32 tx_broadcast_bytes_lo;
- u32 tx_broadcast_bytes_hi;
- u32 tx_discards_lo;
- u32 tx_discards_hi;
- u32 tx_errors_lo;
- u32 tx_errors_hi;
- u32 tx_pause_frames_lo;
- u32 tx_pause_frames_hi;
- u32 tx_pause_on_frames_lo;
- u32 tx_pause_on_frames_hi;
- u32 tx_pause_off_frames_lo;
- u32 tx_pause_off_frames_hi;
- u32 tx_internal_mac_errors_lo;
- u32 tx_internal_mac_errors_hi;
- u32 tx_control_frames_lo;
- u32 tx_control_frames_hi;
- u32 tx_packets_64_bytes_lo;
- u32 tx_packets_64_bytes_hi;
- u32 tx_packets_65_to_127_bytes_lo;
- u32 tx_packets_65_to_127_bytes_hi;
- u32 tx_packets_128_to_255_bytes_lo;
- u32 tx_packets_128_to_255_bytes_hi;
- u32 tx_packets_256_to_511_bytes_lo;
- u32 tx_packets_256_to_511_bytes_hi;
- u32 tx_packets_512_to_1023_bytes_lo;
- u32 tx_packets_512_to_1023_bytes_hi;
- u32 tx_packets_1024_to_1518_bytes_lo;
- u32 tx_packets_1024_to_1518_bytes_hi;
- u32 tx_packets_1519_to_2047_bytes_lo;
- u32 tx_packets_1519_to_2047_bytes_hi;
- u32 tx_packets_2048_to_4095_bytes_lo;
- u32 tx_packets_2048_to_4095_bytes_hi;
- u32 tx_packets_4096_to_8191_bytes_lo;
- u32 tx_packets_4096_to_8191_bytes_hi;
- u32 tx_packets_8192_to_9216_bytes_lo;
- u32 tx_packets_8192_to_9216_bytes_hi;
- u32 tx_lso_packets_lo;
- u32 tx_lso_packets_hi;
- u32 rx_packets_lo;
- u32 rx_packets_hi;
- u32 rx_unicast_packets_lo;
- u32 rx_unicast_packets_hi;
- u32 rx_multicast_packets_lo;
- u32 rx_multicast_packets_hi;
- u32 rx_broadcast_packets_lo;
- u32 rx_broadcast_packets_hi;
- u32 rx_bytes_lo;
- u32 rx_bytes_hi;
- u32 rx_unicast_bytes_lo;
- u32 rx_unicast_bytes_hi;
- u32 rx_multicast_bytes_lo;
- u32 rx_multicast_bytes_hi;
- u32 rx_broadcast_bytes_lo;
- u32 rx_broadcast_bytes_hi;
- u32 rx_unknown_protos;
- u32 rsvd_69; /* Word 69 is reserved */
- u32 rx_discards_lo;
- u32 rx_discards_hi;
- u32 rx_errors_lo;
- u32 rx_errors_hi;
- u32 rx_crc_errors_lo;
- u32 rx_crc_errors_hi;
- u32 rx_alignment_errors_lo;
- u32 rx_alignment_errors_hi;
- u32 rx_symbol_errors_lo;
- u32 rx_symbol_errors_hi;
- u32 rx_pause_frames_lo;
- u32 rx_pause_frames_hi;
- u32 rx_pause_on_frames_lo;
- u32 rx_pause_on_frames_hi;
- u32 rx_pause_off_frames_lo;
- u32 rx_pause_off_frames_hi;
- u32 rx_frames_too_long_lo;
- u32 rx_frames_too_long_hi;
- u32 rx_internal_mac_errors_lo;
- u32 rx_internal_mac_errors_hi;
- u32 rx_undersize_packets;
- u32 rx_oversize_packets;
- u32 rx_fragment_packets;
- u32 rx_jabbers;
- u32 rx_control_frames_lo;
- u32 rx_control_frames_hi;
- u32 rx_control_frames_unknown_opcode_lo;
- u32 rx_control_frames_unknown_opcode_hi;
- u32 rx_in_range_errors;
- u32 rx_out_of_range_errors;
- u32 rx_address_match_errors;
- u32 rx_vlan_mismatch_errors;
- u32 rx_dropped_too_small;
- u32 rx_dropped_too_short;
- u32 rx_dropped_header_too_small;
- u32 rx_dropped_invalid_tcp_length;
- u32 rx_dropped_runt;
- u32 rx_ip_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
- u32 rx_non_rss_packets;
- u32 rsvd_111;
- u32 rx_ipv4_packets_lo;
- u32 rx_ipv4_packets_hi;
- u32 rx_ipv6_packets_lo;
- u32 rx_ipv6_packets_hi;
- u32 rx_ipv4_bytes_lo;
- u32 rx_ipv4_bytes_hi;
- u32 rx_ipv6_bytes_lo;
- u32 rx_ipv6_bytes_hi;
- u32 rx_nic_packets_lo;
- u32 rx_nic_packets_hi;
- u32 rx_tcp_packets_lo;
- u32 rx_tcp_packets_hi;
- u32 rx_iscsi_packets_lo;
- u32 rx_iscsi_packets_hi;
- u32 rx_management_packets_lo;
- u32 rx_management_packets_hi;
- u32 rx_switched_unicast_packets_lo;
- u32 rx_switched_unicast_packets_hi;
- u32 rx_switched_multicast_packets_lo;
- u32 rx_switched_multicast_packets_hi;
- u32 rx_switched_broadcast_packets_lo;
- u32 rx_switched_broadcast_packets_hi;
- u32 num_forwards_lo;
- u32 num_forwards_hi;
- u32 rx_fifo_overflow;
- u32 rx_input_fifo_overflow;
- u32 rx_drops_too_many_frags_lo;
- u32 rx_drops_too_many_frags_hi;
- u32 rx_drops_invalid_queue;
- u32 rsvd_141;
- u32 rx_drops_mtu_lo;
- u32 rx_drops_mtu_hi;
- u32 rx_packets_64_bytes_lo;
- u32 rx_packets_64_bytes_hi;
- u32 rx_packets_65_to_127_bytes_lo;
- u32 rx_packets_65_to_127_bytes_hi;
- u32 rx_packets_128_to_255_bytes_lo;
- u32 rx_packets_128_to_255_bytes_hi;
- u32 rx_packets_256_to_511_bytes_lo;
- u32 rx_packets_256_to_511_bytes_hi;
- u32 rx_packets_512_to_1023_bytes_lo;
- u32 rx_packets_512_to_1023_bytes_hi;
- u32 rx_packets_1024_to_1518_bytes_lo;
- u32 rx_packets_1024_to_1518_bytes_hi;
- u32 rx_packets_1519_to_2047_bytes_lo;
- u32 rx_packets_1519_to_2047_bytes_hi;
- u32 rx_packets_2048_to_4095_bytes_lo;
- u32 rx_packets_2048_to_4095_bytes_hi;
- u32 rx_packets_4096_to_8191_bytes_lo;
- u32 rx_packets_4096_to_8191_bytes_hi;
- u32 rx_packets_8192_to_9216_bytes_lo;
- u32 rx_packets_8192_to_9216_bytes_hi;
-};
-
-struct pport_stats_params {
- u16 pport_num;
- u8 rsvd;
- u8 reset_stats;
-};
-
-struct lancer_cmd_req_pport_stats {
- struct be_cmd_req_hdr hdr;
- union {
- struct pport_stats_params params;
- u8 rsvd[sizeof(struct lancer_pport_stats)];
- } cmd_params;
-};
-
-struct lancer_cmd_resp_pport_stats {
- struct be_cmd_resp_hdr hdr;
- struct lancer_pport_stats pport_stats;
-};
-
-static inline struct lancer_pport_stats*
- pport_stats_from_cmd(struct be_adapter *adapter)
-{
- struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
- return &cmd->pport_stats;
-}
-
-struct be_cmd_req_get_cntl_addnl_attribs {
- struct be_cmd_req_hdr hdr;
- u8 rsvd[8];
-};
-
-struct be_cmd_resp_get_cntl_addnl_attribs {
- struct be_cmd_resp_hdr hdr;
- u16 ipl_file_number;
- u8 ipl_file_version;
- u8 rsvd0;
- u8 on_die_temperature; /* in degrees centigrade*/
- u8 rsvd1[3];
-};
-
-struct be_cmd_req_vlan_config {
- struct be_cmd_req_hdr hdr;
- u8 interface_id;
- u8 promiscuous;
- u8 untagged;
- u8 num_vlan;
- u16 normal_vlan[64];
-} __packed;
-
-/******************* RX FILTER ******************************/
-#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
-struct macaddr {
- u8 byte[ETH_ALEN];
-};
-
-struct be_cmd_req_rx_filter {
- struct be_cmd_req_hdr hdr;
- u32 global_flags_mask;
- u32 global_flags;
- u32 if_flags_mask;
- u32 if_flags;
- u32 if_id;
- u32 mcast_num;
- struct macaddr mcast_mac[BE_MAX_MC];
-};
-
-/******************** Link Status Query *******************/
-struct be_cmd_req_link_status {
- struct be_cmd_req_hdr hdr;
- u32 rsvd;
-};
-
-enum {
- PHY_LINK_DUPLEX_NONE = 0x0,
- PHY_LINK_DUPLEX_HALF = 0x1,
- PHY_LINK_DUPLEX_FULL = 0x2
-};
-
-enum {
- PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
- PHY_LINK_SPEED_10MBPS = 0x1,
- PHY_LINK_SPEED_100MBPS = 0x2,
- PHY_LINK_SPEED_1GBPS = 0x3,
- PHY_LINK_SPEED_10GBPS = 0x4
-};
-
-struct be_cmd_resp_link_status {
- struct be_cmd_resp_hdr hdr;
- u8 physical_port;
- u8 mac_duplex;
- u8 mac_speed;
- u8 mac_fault;
- u8 mgmt_mac_duplex;
- u8 mgmt_mac_speed;
- u16 link_speed;
- u32 rsvd0;
-} __packed;
-
-/******************** Port Identification ***************************/
-/* Identifies the type of port attached to NIC */
-struct be_cmd_req_port_type {
- struct be_cmd_req_hdr hdr;
- u32 page_num;
- u32 port;
-};
-
-enum {
- TR_PAGE_A0 = 0xa0,
- TR_PAGE_A2 = 0xa2
-};
-
-struct be_cmd_resp_port_type {
- struct be_cmd_resp_hdr hdr;
- u32 page_num;
- u32 port;
- struct data {
- u8 identifier;
- u8 identifier_ext;
- u8 connector;
- u8 transceiver[8];
- u8 rsvd0[3];
- u8 length_km;
- u8 length_hm;
- u8 length_om1;
- u8 length_om2;
- u8 length_cu;
- u8 length_cu_m;
- u8 vendor_name[16];
- u8 rsvd;
- u8 vendor_oui[3];
- u8 vendor_pn[16];
- u8 vendor_rev[4];
- } data;
-};
-
-/******************** Get FW Version *******************/
-struct be_cmd_req_get_fw_version {
- struct be_cmd_req_hdr hdr;
- u8 rsvd0[FW_VER_LEN];
- u8 rsvd1[FW_VER_LEN];
-} __packed;
-
-struct be_cmd_resp_get_fw_version {
- struct be_cmd_resp_hdr hdr;
- u8 firmware_version_string[FW_VER_LEN];
- u8 fw_on_flash_version_string[FW_VER_LEN];
-} __packed;
-
-/******************** Set Flow Contrl *******************/
-struct be_cmd_req_set_flow_control {
- struct be_cmd_req_hdr hdr;
- u16 tx_flow_control;
- u16 rx_flow_control;
-} __packed;
-
-/******************** Get Flow Contrl *******************/
-struct be_cmd_req_get_flow_control {
- struct be_cmd_req_hdr hdr;
- u32 rsvd;
-};
-
-struct be_cmd_resp_get_flow_control {
- struct be_cmd_resp_hdr hdr;
- u16 tx_flow_control;
- u16 rx_flow_control;
-} __packed;
-
-/******************** Modify EQ Delay *******************/
-struct be_cmd_req_modify_eq_delay {
- struct be_cmd_req_hdr hdr;
- u32 num_eq;
- struct {
- u32 eq_id;
- u32 phase;
- u32 delay_multiplier;
- } delay[8];
-} __packed;
-
-struct be_cmd_resp_modify_eq_delay {
- struct be_cmd_resp_hdr hdr;
- u32 rsvd0;
-} __packed;
-
-/******************** Get FW Config *******************/
-#define BE_FUNCTION_CAPS_RSS 0x2
-struct be_cmd_req_query_fw_cfg {
- struct be_cmd_req_hdr hdr;
- u32 rsvd[31];
-};
-
-struct be_cmd_resp_query_fw_cfg {
- struct be_cmd_resp_hdr hdr;
- u32 be_config_number;
- u32 asic_revision;
- u32 phys_port;
- u32 function_mode;
- u32 rsvd[26];
- u32 function_caps;
-};
-
-/******************** RSS Config *******************/
-/* RSS types */
-#define RSS_ENABLE_NONE 0x0
-#define RSS_ENABLE_IPV4 0x1
-#define RSS_ENABLE_TCP_IPV4 0x2
-#define RSS_ENABLE_IPV6 0x4
-#define RSS_ENABLE_TCP_IPV6 0x8
-
-struct be_cmd_req_rss_config {
- struct be_cmd_req_hdr hdr;
- u32 if_id;
- u16 enable_rss;
- u16 cpu_table_size_log2;
- u32 hash[10];
- u8 cpu_table[128];
- u8 flush;
- u8 rsvd0[3];
-};
-
-/******************** Port Beacon ***************************/
-
-#define BEACON_STATE_ENABLED 0x1
-#define BEACON_STATE_DISABLED 0x0
-
-struct be_cmd_req_enable_disable_beacon {
- struct be_cmd_req_hdr hdr;
- u8 port_num;
- u8 beacon_state;
- u8 beacon_duration;
- u8 status_duration;
-} __packed;
-
-struct be_cmd_resp_enable_disable_beacon {
- struct be_cmd_resp_hdr resp_hdr;
- u32 rsvd0;
-} __packed;
-
-struct be_cmd_req_get_beacon_state {
- struct be_cmd_req_hdr hdr;
- u8 port_num;
- u8 rsvd0;
- u16 rsvd1;
-} __packed;
-
-struct be_cmd_resp_get_beacon_state {
- struct be_cmd_resp_hdr resp_hdr;
- u8 beacon_state;
- u8 rsvd0[3];
-} __packed;
-
-/****************** Firmware Flash ******************/
-struct flashrom_params {
- u32 op_code;
- u32 op_type;
- u32 data_buf_size;
- u32 offset;
- u8 data_buf[4];
-};
-
-struct be_cmd_write_flashrom {
- struct be_cmd_req_hdr hdr;
- struct flashrom_params params;
-};
-
-/**************** Lancer Firmware Flash ************/
-struct amap_lancer_write_obj_context {
- u8 write_length[24];
- u8 reserved1[7];
- u8 eof;
-} __packed;
-
-struct lancer_cmd_req_write_object {
- struct be_cmd_req_hdr hdr;
- u8 context[sizeof(struct amap_lancer_write_obj_context) / 8];
- u32 write_offset;
- u8 object_name[104];
- u32 descriptor_count;
- u32 buf_len;
- u32 addr_low;
- u32 addr_high;
-};
-
-struct lancer_cmd_resp_write_object {
- u8 opcode;
- u8 subsystem;
- u8 rsvd1[2];
- u8 status;
- u8 additional_status;
- u8 rsvd2[2];
- u32 resp_len;
- u32 actual_resp_len;
- u32 actual_write_len;
-};
-
-/************************ WOL *******************************/
-struct be_cmd_req_acpi_wol_magic_config{
- struct be_cmd_req_hdr hdr;
- u32 rsvd0[145];
- u8 magic_mac[6];
- u8 rsvd2[2];
-} __packed;
-
-/********************** LoopBack test *********************/
-struct be_cmd_req_loopback_test {
- struct be_cmd_req_hdr hdr;
- u32 loopback_type;
- u32 num_pkts;
- u64 pattern;
- u32 src_port;
- u32 dest_port;
- u32 pkt_size;
-};
-
-struct be_cmd_resp_loopback_test {
- struct be_cmd_resp_hdr resp_hdr;
- u32 status;
- u32 num_txfer;
- u32 num_rx;
- u32 miscomp_off;
- u32 ticks_compl;
-};
-
-struct be_cmd_req_set_lmode {
- struct be_cmd_req_hdr hdr;
- u8 src_port;
- u8 dest_port;
- u8 loopback_type;
- u8 loopback_state;
-};
-
-struct be_cmd_resp_set_lmode {
- struct be_cmd_resp_hdr resp_hdr;
- u8 rsvd0[4];
-};
-
-/********************** DDR DMA test *********************/
-struct be_cmd_req_ddrdma_test {
- struct be_cmd_req_hdr hdr;
- u64 pattern;
- u32 byte_count;
- u32 rsvd0;
- u8 snd_buff[4096];
- u8 rsvd1[4096];
-};
-
-struct be_cmd_resp_ddrdma_test {
- struct be_cmd_resp_hdr hdr;
- u64 pattern;
- u32 byte_cnt;
- u32 snd_err;
- u8 rsvd0[4096];
- u8 rcv_buff[4096];
-};
-
-/*********************** SEEPROM Read ***********************/
-
-#define BE_READ_SEEPROM_LEN 1024
-struct be_cmd_req_seeprom_read {
- struct be_cmd_req_hdr hdr;
- u8 rsvd0[BE_READ_SEEPROM_LEN];
-};
-
-struct be_cmd_resp_seeprom_read {
- struct be_cmd_req_hdr hdr;
- u8 seeprom_data[BE_READ_SEEPROM_LEN];
-};
-
-enum {
- PHY_TYPE_CX4_10GB = 0,
- PHY_TYPE_XFP_10GB,
- PHY_TYPE_SFP_1GB,
- PHY_TYPE_SFP_PLUS_10GB,
- PHY_TYPE_KR_10GB,
- PHY_TYPE_KX4_10GB,
- PHY_TYPE_BASET_10GB,
- PHY_TYPE_BASET_1GB,
- PHY_TYPE_DISABLED = 255
-};
-
-struct be_cmd_req_get_phy_info {
- struct be_cmd_req_hdr hdr;
- u8 rsvd0[24];
-};
-
-struct be_phy_info {
- u16 phy_type;
- u16 interface_type;
- u32 misc_params;
- u32 future_use[4];
-};
-
-struct be_cmd_resp_get_phy_info {
- struct be_cmd_req_hdr hdr;
- struct be_phy_info phy_info;
-};
-
-/*********************** Set QOS ***********************/
-
-#define BE_QOS_BITS_NIC 1
-
-struct be_cmd_req_set_qos {
- struct be_cmd_req_hdr hdr;
- u32 valid_bits;
- u32 max_bps_nic;
- u32 rsvd[7];
-};
-
-struct be_cmd_resp_set_qos {
- struct be_cmd_resp_hdr hdr;
- u32 rsvd;
-};
-
-/*********************** Controller Attributes ***********************/
-struct be_cmd_req_cntl_attribs {
- struct be_cmd_req_hdr hdr;
-};
-
-struct be_cmd_resp_cntl_attribs {
- struct be_cmd_resp_hdr hdr;
- struct mgmt_controller_attrib attribs;
-};
-
-/*********************** Set driver function ***********************/
-#define CAPABILITY_SW_TIMESTAMPS 2
-#define CAPABILITY_BE3_NATIVE_ERX_API 4
-
-struct be_cmd_req_set_func_cap {
- struct be_cmd_req_hdr hdr;
- u32 valid_cap_flags;
- u32 cap_flags;
- u8 rsvd[212];
-};
-
-struct be_cmd_resp_set_func_cap {
- struct be_cmd_resp_hdr hdr;
- u32 valid_cap_flags;
- u32 cap_flags;
- u8 rsvd[212];
-};
-
-/*************** HW Stats Get v1 **********************************/
-#define BE_TXP_SW_SZ 48
-struct be_port_rxf_stats_v1 {
- u32 rsvd0[12];
- u32 rx_crc_errors;
- u32 rx_alignment_symbol_errors;
- u32 rx_pause_frames;
- u32 rx_priority_pause_frames;
- u32 rx_control_frames;
- u32 rx_in_range_errors;
- u32 rx_out_range_errors;
- u32 rx_frame_too_long;
- u32 rx_address_match_errors;
- u32 rx_dropped_too_small;
- u32 rx_dropped_too_short;
- u32 rx_dropped_header_too_small;
- u32 rx_dropped_tcp_length;
- u32 rx_dropped_runt;
- u32 rsvd1[10];
- u32 rx_ip_checksum_errs;
- u32 rx_tcp_checksum_errs;
- u32 rx_udp_checksum_errs;
- u32 rsvd2[7];
- u32 rx_switched_unicast_packets;
- u32 rx_switched_multicast_packets;
- u32 rx_switched_broadcast_packets;
- u32 rsvd3[3];
- u32 tx_pauseframes;
- u32 tx_priority_pauseframes;
- u32 tx_controlframes;
- u32 rsvd4[10];
- u32 rxpp_fifo_overflow_drop;
- u32 rx_input_fifo_overflow_drop;
- u32 pmem_fifo_overflow_drop;
- u32 jabber_events;
- u32 rsvd5[3];
-};
-
-
-struct be_rxf_stats_v1 {
- struct be_port_rxf_stats_v1 port[4];
- u32 rsvd0[2];
- u32 rx_drops_no_pbuf;
- u32 rx_drops_no_txpb;
- u32 rx_drops_no_erx_descr;
- u32 rx_drops_no_tpre_descr;
- u32 rsvd1[6];
- u32 rx_drops_too_many_frags;
- u32 rx_drops_invalid_ring;
- u32 forwarded_packets;
- u32 rx_drops_mtu;
- u32 rsvd2[14];
-};
-
-struct be_erx_stats_v1 {
- u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
- u32 rsvd[4];
-};
-
-struct be_hw_stats_v1 {
- struct be_rxf_stats_v1 rxf;
- u32 rsvd0[BE_TXP_SW_SZ];
- struct be_erx_stats_v1 erx;
- struct be_pmem_stats pmem;
- u32 rsvd1[3];
-};
-
-struct be_cmd_req_get_stats_v1 {
- struct be_cmd_req_hdr hdr;
- u8 rsvd[sizeof(struct be_hw_stats_v1)];
-};
-
-struct be_cmd_resp_get_stats_v1 {
- struct be_cmd_resp_hdr hdr;
- struct be_hw_stats_v1 hw_stats;
-};
-
-static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
-
- return &cmd->hw_stats;
- } else {
- struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
-
- return &cmd->hw_stats;
- }
-}
-
-static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
-{
- if (adapter->generation == BE_GEN3) {
- struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->erx;
- } else {
- struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
-
- return &hw_stats->erx;
- }
-}
-
-extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_cmd_POST(struct be_adapter *adapter);
-extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle);
-extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- u32 pmac_id, u32 domain);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u8 *mac, bool pmac_invalid,
- u32 *if_handle, u32 *pmac_id, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
- u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay);
-extern int be_cmd_cq_create(struct be_adapter *adapter,
- struct be_queue_info *cq, struct be_queue_info *eq,
- bool sol_evts, bool no_delay,
- int num_cqe_dma_coalesce);
-extern int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq);
-extern int be_cmd_txq_create(struct be_adapter *adapter,
- struct be_queue_info *txq,
- struct be_queue_info *cq);
-extern int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id,
- u16 frag_size, u16 max_frame_size, u32 if_id,
- u32 rss, u8 *rss_id);
-extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int type);
-extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
- struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter,
- u8 *mac_speed, u16 *link_speed, u32 dom);
-extern int be_cmd_reset(struct be_adapter *adapter);
-extern int be_cmd_get_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
-
-extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
-extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
- u16 *vtag_array, u32 num, bool untagged,
- bool promiscuous);
-extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
-extern int be_cmd_set_flow_control(struct be_adapter *adapter,
- u32 tx_fc, u32 rx_fc);
-extern int be_cmd_get_flow_control(struct be_adapter *adapter,
- u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
- u32 *port_num, u32 *function_mode, u32 *function_caps);
-extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter, int *status);
-extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
- u8 port_num, u8 beacon, u8 status, u8 state);
-extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
- u8 port_num, u32 *state);
-extern int be_cmd_write_flashrom(struct be_adapter *adapter,
- struct be_dma_mem *cmd, u32 flash_oper,
- u32 flash_opcode, u32 buf_size);
-extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *addn_status);
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset);
-extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_fw_init(struct be_adapter *adapter);
-extern int be_cmd_fw_clean(struct be_adapter *adapter);
-extern void be_async_mcc_enable(struct be_adapter *adapter);
-extern void be_async_mcc_disable(struct be_adapter *adapter);
-extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size,
- u32 num_pkts, u64 pattern);
-extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd);
-extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
- u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter,
- struct be_phy_info *phy_info);
-extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_dump_ue(struct be_adapter *adapter);
-extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
-extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_req_native_mode(struct be_adapter *adapter);
-extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-
+++ /dev/null
-/*
- * Copyright (C) 2005 - 2011 Emulex
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-#include "be.h"
-#include "be_cmds.h"
-#include <linux/ethtool.h>
-
-struct be_ethtool_stat {
- char desc[ETH_GSTRING_LEN];
- int type;
- int size;
- int offset;
-};
-
-enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
-#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
- offsetof(_struct, field)
-#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
- FIELDINFO(struct be_tx_stats, field)
-#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
- FIELDINFO(struct be_rx_stats, field)
-#define DRVSTAT_INFO(field) #field, DRVSTAT,\
- FIELDINFO(struct be_drv_stats, field)
-
-static const struct be_ethtool_stat et_stats[] = {
- {DRVSTAT_INFO(tx_events)},
- {DRVSTAT_INFO(rx_crc_errors)},
- {DRVSTAT_INFO(rx_alignment_symbol_errors)},
- {DRVSTAT_INFO(rx_pause_frames)},
- {DRVSTAT_INFO(rx_control_frames)},
- {DRVSTAT_INFO(rx_in_range_errors)},
- {DRVSTAT_INFO(rx_out_range_errors)},
- {DRVSTAT_INFO(rx_frame_too_long)},
- {DRVSTAT_INFO(rx_address_match_errors)},
- {DRVSTAT_INFO(rx_dropped_too_small)},
- {DRVSTAT_INFO(rx_dropped_too_short)},
- {DRVSTAT_INFO(rx_dropped_header_too_small)},
- {DRVSTAT_INFO(rx_dropped_tcp_length)},
- {DRVSTAT_INFO(rx_dropped_runt)},
- {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
- {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
- {DRVSTAT_INFO(rx_ip_checksum_errs)},
- {DRVSTAT_INFO(rx_tcp_checksum_errs)},
- {DRVSTAT_INFO(rx_udp_checksum_errs)},
- {DRVSTAT_INFO(tx_pauseframes)},
- {DRVSTAT_INFO(tx_controlframes)},
- {DRVSTAT_INFO(rx_priority_pause_frames)},
- {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
- {DRVSTAT_INFO(jabber_events)},
- {DRVSTAT_INFO(rx_drops_no_pbuf)},
- {DRVSTAT_INFO(rx_drops_no_txpb)},
- {DRVSTAT_INFO(rx_drops_no_erx_descr)},
- {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
- {DRVSTAT_INFO(rx_drops_too_many_frags)},
- {DRVSTAT_INFO(rx_drops_invalid_ring)},
- {DRVSTAT_INFO(forwarded_packets)},
- {DRVSTAT_INFO(rx_drops_mtu)},
- {DRVSTAT_INFO(eth_red_drops)},
- {DRVSTAT_INFO(be_on_die_temperature)}
-};
-#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
-
-/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
- * are first and second members respectively.
- */
-static const struct be_ethtool_stat et_rx_stats[] = {
- {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
- {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
- {DRVSTAT_RX_INFO(rx_polls)},
- {DRVSTAT_RX_INFO(rx_events)},
- {DRVSTAT_RX_INFO(rx_compl)},
- {DRVSTAT_RX_INFO(rx_mcast_pkts)},
- {DRVSTAT_RX_INFO(rx_post_fail)},
- {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
- {DRVSTAT_RX_INFO(rx_drops_no_frags)}
-};
-#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
-
-/* Stats related to multi TX queues: get_stats routine assumes compl is the
- * first member
- */
-static const struct be_ethtool_stat et_tx_stats[] = {
- {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
- {DRVSTAT_TX_INFO(tx_bytes)},
- {DRVSTAT_TX_INFO(tx_pkts)},
- {DRVSTAT_TX_INFO(tx_reqs)},
- {DRVSTAT_TX_INFO(tx_wrbs)},
- {DRVSTAT_TX_INFO(tx_compl)},
- {DRVSTAT_TX_INFO(tx_stops)}
-};
-#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
-
-static const char et_self_tests[][ETH_GSTRING_LEN] = {
- "MAC Loopback test",
- "PHY Loopback test",
- "External Loopback test",
- "DDR DMA test",
- "Link test"
-};
-
-#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
-#define BE_MAC_LOOPBACK 0x0
-#define BE_PHY_LOOPBACK 0x1
-#define BE_ONE_PORT_EXT_LOOPBACK 0x2
-#define BE_NO_LOOPBACK 0xff
-
-static void
-be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VER);
- strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
-}
-
-static int
-be_get_reg_len(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- u32 log_size = 0;
-
- if (be_physfn(adapter))
- be_cmd_get_reg_len(adapter, &log_size);
-
- return log_size;
-}
-
-static void
-be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (be_physfn(adapter)) {
- memset(buf, 0, regs->len);
- be_cmd_get_regs(adapter, regs->len, buf);
- }
-}
-
-static int
-be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
-
- coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
- coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
- coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
-
- coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
- coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
- coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
-
- coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
- coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
-
- return 0;
-}
-
-/*
- * This routine is used to set interrup coalescing delay
- */
-static int
-be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
- struct be_eq_obj *rx_eq;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- u32 rx_max, rx_min, rx_cur;
- int status = 0, i;
- u32 tx_cur;
-
- if (coalesce->use_adaptive_tx_coalesce == 1)
- return -EINVAL;
-
- for_all_rx_queues(adapter, rxo, i) {
- rx_eq = &rxo->rx_eq;
-
- if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
- rx_eq->cur_eqd = 0;
- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
-
- rx_max = coalesce->rx_coalesce_usecs_high;
- rx_min = coalesce->rx_coalesce_usecs_low;
- rx_cur = coalesce->rx_coalesce_usecs;
-
- if (rx_eq->enable_aic) {
- if (rx_max > BE_MAX_EQD)
- rx_max = BE_MAX_EQD;
- if (rx_min > rx_max)
- rx_min = rx_max;
- rx_eq->max_eqd = rx_max;
- rx_eq->min_eqd = rx_min;
- if (rx_eq->cur_eqd > rx_max)
- rx_eq->cur_eqd = rx_max;
- if (rx_eq->cur_eqd < rx_min)
- rx_eq->cur_eqd = rx_min;
- } else {
- if (rx_cur > BE_MAX_EQD)
- rx_cur = BE_MAX_EQD;
- if (rx_eq->cur_eqd != rx_cur) {
- status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
- rx_cur);
- if (!status)
- rx_eq->cur_eqd = rx_cur;
- }
- }
- }
-
- tx_cur = coalesce->tx_coalesce_usecs;
-
- if (tx_cur > BE_MAX_EQD)
- tx_cur = BE_MAX_EQD;
- if (tx_eq->cur_eqd != tx_cur) {
- status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
- if (!status)
- tx_eq->cur_eqd = tx_cur;
- }
-
- return 0;
-}
-
-static void
-be_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
- struct be_tx_obj *txo;
- void *p;
- unsigned int i, j, base = 0, start;
-
- for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
- p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
- data[i] = *(u32 *)p;
- }
- base += ETHTOOL_STATS_NUM;
-
- for_all_rx_queues(adapter, rxo, j) {
- struct be_rx_stats *stats = rx_stats(rxo);
-
- do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
- data[base] = stats->rx_bytes;
- data[base + 1] = stats->rx_pkts;
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
- for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
- p = (u8 *)stats + et_rx_stats[i].offset;
- data[base + i] = *(u32 *)p;
- }
- base += ETHTOOL_RXSTATS_NUM;
- }
-
- for_all_tx_queues(adapter, txo, j) {
- struct be_tx_stats *stats = tx_stats(txo);
-
- do {
- start = u64_stats_fetch_begin_bh(&stats->sync_compl);
- data[base] = stats->tx_compl;
- } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
-
- do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
- for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
- p = (u8 *)stats + et_tx_stats[i].offset;
- data[base + i] =
- (et_tx_stats[i].size == sizeof(u64)) ?
- *(u64 *)p : *(u32 *)p;
- }
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
- base += ETHTOOL_TXSTATS_NUM;
- }
-}
-
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
- uint8_t *data)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int i, j;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
- memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < adapter->num_rx_qs; i++) {
- for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
- sprintf(data, "rxq%d: %s", i,
- et_rx_stats[j].desc);
- data += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < adapter->num_tx_qs; i++) {
- for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
- sprintf(data, "txq%d: %s", i,
- et_tx_stats[j].desc);
- data += ETH_GSTRING_LEN;
- }
- }
- break;
- case ETH_SS_TEST:
- for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
- memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int be_get_sset_count(struct net_device *netdev, int stringset)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- switch (stringset) {
- case ETH_SS_TEST:
- return ETHTOOL_TESTS_NUM;
- case ETH_SS_STATS:
- return ETHTOOL_STATS_NUM +
- adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
- adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
- default:
- return -EINVAL;
- }
-}
-
-static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_phy_info phy_info;
- u8 mac_speed = 0;
- u16 link_speed = 0;
- int status;
-
- if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
- status = be_cmd_link_status_query(adapter, &mac_speed,
- &link_speed, 0);
-
- /* link_speed is in units of 10 Mbps */
- if (link_speed) {
- ethtool_cmd_speed_set(ecmd, link_speed*10);
- } else {
- switch (mac_speed) {
- case PHY_LINK_SPEED_10MBPS:
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- break;
- case PHY_LINK_SPEED_100MBPS:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
- break;
- case PHY_LINK_SPEED_1GBPS:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- break;
- case PHY_LINK_SPEED_10GBPS:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- break;
- case PHY_LINK_SPEED_ZERO:
- ethtool_cmd_speed_set(ecmd, 0);
- break;
- }
- }
-
- status = be_cmd_get_phy_info(adapter, &phy_info);
- if (!status) {
- switch (phy_info.interface_type) {
- case PHY_TYPE_XFP_10GB:
- case PHY_TYPE_SFP_1GB:
- case PHY_TYPE_SFP_PLUS_10GB:
- ecmd->port = PORT_FIBRE;
- break;
- default:
- ecmd->port = PORT_TP;
- break;
- }
-
- switch (phy_info.interface_type) {
- case PHY_TYPE_KR_10GB:
- case PHY_TYPE_KX4_10GB:
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->transceiver = XCVR_INTERNAL;
- break;
- default:
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_EXTERNAL;
- break;
- }
- }
-
- /* Save for future use */
- adapter->link_speed = ethtool_cmd_speed(ecmd);
- adapter->port_type = ecmd->port;
- adapter->transceiver = ecmd->transceiver;
- adapter->autoneg = ecmd->autoneg;
- } else {
- ethtool_cmd_speed_set(ecmd, adapter->link_speed);
- ecmd->port = adapter->port_type;
- ecmd->transceiver = adapter->transceiver;
- ecmd->autoneg = adapter->autoneg;
- }
-
- ecmd->duplex = DUPLEX_FULL;
- ecmd->phy_address = adapter->port_num;
- switch (ecmd->port) {
- case PORT_FIBRE:
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- break;
- case PORT_TP:
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
- break;
- case PORT_AUI:
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
- break;
- }
-
- if (ecmd->autoneg) {
- ecmd->supported |= SUPPORTED_1000baseT_Full;
- ecmd->supported |= SUPPORTED_Autoneg;
- ecmd->advertising |= (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full);
- }
-
- return 0;
-}
-
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- ring->rx_max_pending = adapter->rx_obj[0].q.len;
- ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
- ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
- ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
-}
-
-static void
-be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
- ecmd->autoneg = 0;
-}
-
-static int
-be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status;
-
- if (ecmd->autoneg != 0)
- return -EINVAL;
- adapter->tx_fc = ecmd->tx_pause;
- adapter->rx_fc = ecmd->rx_pause;
-
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
-
- return status;
-}
-
-static int
-be_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
- &adapter->beacon_state);
- return 1; /* cycle on/off once per second */
-
- case ETHTOOL_ID_ON:
- be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- BEACON_STATE_ENABLED);
- break;
-
- case ETHTOOL_ID_OFF:
- be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- BEACON_STATE_DISABLED);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- adapter->beacon_state);
- }
-
- return 0;
-}
-
-static bool
-be_is_wol_supported(struct be_adapter *adapter)
-{
- if (!be_physfn(adapter))
- return false;
- else
- return true;
-}
-
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (be_is_wol_supported(adapter))
- wol->supported = WAKE_MAGIC;
-
- if (adapter->wol)
- wol->wolopts = WAKE_MAGIC;
- else
- wol->wolopts = 0;
- memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (wol->wolopts & ~WAKE_MAGIC)
- return -EINVAL;
-
- if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
- adapter->wol = true;
- else
- adapter->wol = false;
-
- return 0;
-}
-
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
-{
- int ret, i;
- struct be_dma_mem ddrdma_cmd;
- static const u64 pattern[2] = {
- 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
- };
-
- ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
- &ddrdma_cmd.dma, GFP_KERNEL);
- if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < 2; i++) {
- ret = be_cmd_ddr_dma_test(adapter, pattern[i],
- 4096, &ddrdma_cmd);
- if (ret != 0)
- goto err;
- }
-
-err:
- dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
- ddrdma_cmd.dma);
- return ret;
-}
-
-static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
- u64 *status)
-{
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- loopback_type, 1);
- *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
- loopback_type, 1500,
- 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- BE_NO_LOOPBACK, 1);
- return *status;
-}
-
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- u8 mac_speed = 0;
- u16 qos_link_speed = 0;
-
- memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
-
- if (test->flags & ETH_TEST_FL_OFFLINE) {
- if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0) {
- test->flags |= ETH_TEST_FL_FAILED;
- }
- if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0) {
- test->flags |= ETH_TEST_FL_FAILED;
- }
- if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
- &data[2]) != 0) {
- test->flags |= ETH_TEST_FL_FAILED;
- }
- }
-
- if (be_test_ddr_dma(adapter) != 0) {
- data[3] = 1;
- test->flags |= ETH_TEST_FL_FAILED;
- }
-
- if (be_cmd_link_status_query(adapter, &mac_speed,
- &qos_link_speed, 0) != 0) {
- test->flags |= ETH_TEST_FL_FAILED;
- data[4] = -1;
- } else if (!mac_speed) {
- test->flags |= ETH_TEST_FL_FAILED;
- data[4] = 1;
- }
-}
-
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- char file_name[ETHTOOL_FLASH_MAX_FILENAME];
-
- file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
- strcpy(file_name, efl->data);
-
- return be_load_fw(adapter, file_name);
-}
-
-static int
-be_get_eeprom_len(struct net_device *netdev)
-{
- return BE_READ_SEEPROM_LEN;
-}
-
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
- uint8_t *data)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_dma_mem eeprom_cmd;
- struct be_cmd_resp_seeprom_read *resp;
- int status;
-
- if (!eeprom->len)
- return -EINVAL;
-
- eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
-
- memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
- eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
- &eeprom_cmd.dma, GFP_KERNEL);
-
- if (!eeprom_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure. Could not read eeprom\n");
- return -ENOMEM;
- }
-
- status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
-
- if (!status) {
- resp = eeprom_cmd.va;
- memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
- }
- dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
- eeprom_cmd.dma);
-
- return status;
-}
-
-const struct ethtool_ops be_ethtool_ops = {
- .get_settings = be_get_settings,
- .get_drvinfo = be_get_drvinfo,
- .get_wol = be_get_wol,
- .set_wol = be_set_wol,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = be_get_eeprom_len,
- .get_eeprom = be_read_eeprom,
- .get_coalesce = be_get_coalesce,
- .set_coalesce = be_set_coalesce,
- .get_ringparam = be_get_ringparam,
- .get_pauseparam = be_get_pauseparam,
- .set_pauseparam = be_set_pauseparam,
- .get_strings = be_get_stat_strings,
- .set_phys_id = be_set_phys_id,
- .get_sset_count = be_get_sset_count,
- .get_ethtool_stats = be_get_ethtool_stats,
- .get_regs_len = be_get_reg_len,
- .get_regs = be_get_regs,
- .flash_device = be_do_flash,
- .self_test = be_self_test,
-};
+++ /dev/null
-/*
- * Copyright (C) 2005 - 2011 Emulex
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-/********* Mailbox door bell *************/
-/* Used for driver communication with the FW.
- * The software must write this register twice to post any command. First,
- * it writes the register with hi=1 and the upper bits of the physical address
- * for the MAILBOX structure. Software must poll the ready bit until this
- * is acknowledged. Then, sotware writes the register with hi=0 with the lower
- * bits in the address. It must poll the ready bit until the command is
- * complete. Upon completion, the MAILBOX will contain a valid completion
- * queue entry.
- */
-#define MPU_MAILBOX_DB_OFFSET 0x160
-#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
-#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
-
-#define MPU_EP_CONTROL 0
-
-/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
-
-/* MPU semphore POST stage values */
-#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
-#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
-#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
-#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-
-
-/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
-#define SLIPORT_STATUS_OFFSET 0x404
-#define SLIPORT_CONTROL_OFFSET 0x408
-
-#define SLIPORT_STATUS_ERR_MASK 0x80000000
-#define SLIPORT_STATUS_RN_MASK 0x01000000
-#define SLIPORT_STATUS_RDY_MASK 0x00800000
-
-
-#define SLI_PORT_CONTROL_IP_MASK 0x08000000
-
-/********* Memory BAR register ************/
-#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
-/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
- * Disable" may still globally block interrupts in addition to individual
- * interrupt masks; a mechanism for the device driver to block all interrupts
- * atomically without having to arbitrate for the PCI Interrupt Disable bit
- * with the OS.
- */
-#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
-
-/********* Power management (WOL) **********/
-#define PCICFG_PM_CONTROL_OFFSET 0x44
-#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
-
-/********* Online Control Registers *******/
-#define PCICFG_ONLINE0 0xB0
-#define PCICFG_ONLINE1 0xB4
-
-/********* UE Status and Mask Registers ***/
-#define PCICFG_UE_STATUS_LOW 0xA0
-#define PCICFG_UE_STATUS_HIGH 0xA4
-#define PCICFG_UE_STATUS_LOW_MASK 0xA8
-#define PCICFG_UE_STATUS_HI_MASK 0xAC
-
-/******** SLI_INTF ***********************/
-#define SLI_INTF_REG_OFFSET 0x58
-#define SLI_INTF_VALID_MASK 0xE0000000
-#define SLI_INTF_VALID 0xC0000000
-#define SLI_INTF_HINT2_MASK 0x1F000000
-#define SLI_INTF_HINT2_SHIFT 24
-#define SLI_INTF_HINT1_MASK 0x00FF0000
-#define SLI_INTF_HINT1_SHIFT 16
-#define SLI_INTF_FAMILY_MASK 0x00000F00
-#define SLI_INTF_FAMILY_SHIFT 8
-#define SLI_INTF_IF_TYPE_MASK 0x0000F000
-#define SLI_INTF_IF_TYPE_SHIFT 12
-#define SLI_INTF_REV_MASK 0x000000F0
-#define SLI_INTF_REV_SHIFT 4
-#define SLI_INTF_FT_MASK 0x00000001
-
-
-/* SLI family */
-#define BE_SLI_FAMILY 0x0
-#define LANCER_A0_SLI_FAMILY 0xA
-
-
-/********* ISR0 Register offset **********/
-#define CEV_ISR0_OFFSET 0xC18
-#define CEV_ISR_SIZE 4
-
-/********* Event Q door bell *************/
-#define DB_EQ_OFFSET DB_CQ_OFFSET
-#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
-#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
-#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
-
-/* Clear the interrupt for this eq */
-#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
-/* Must be 1 */
-#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
-/* Number of event entries processed */
-#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
-/* Rearm bit */
-#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
-
-/********* Compl Q door bell *************/
-#define DB_CQ_OFFSET 0x120
-#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
-#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
-#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
- placing at 11-15 */
-
-/* Number of event entries processed */
-#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
-/* Rearm bit */
-#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
-
-/********** TX ULP door bell *************/
-#define DB_TXULP1_OFFSET 0x60
-#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
-/* Number of tx entries posted */
-#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
-#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
-
-/********** RQ(erx) door bell ************/
-#define DB_RQ_OFFSET 0x100
-#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
-/* Number of rx frags posted */
-#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
-
-/********** MCC door bell ************/
-#define DB_MCCQ_OFFSET 0x140
-#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
-/* Number of entries posted */
-#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
-
-/********** SRIOV VF PCICFG OFFSET ********/
-#define SRIOV_VF_PCICFG_OFFSET (4096)
-
-/********** FAT TABLE ********/
-#define RETRIEVE_FAT 0
-#define QUERY_FAT 1
-
-/* Flashrom related descriptors */
-#define IMAGE_TYPE_FIRMWARE 160
-#define IMAGE_TYPE_BOOTCODE 224
-#define IMAGE_TYPE_OPTIONROM 32
-
-#define NUM_FLASHDIR_ENTRIES 32
-
-#define IMG_TYPE_ISCSI_ACTIVE 0
-#define IMG_TYPE_REDBOOT 1
-#define IMG_TYPE_BIOS 2
-#define IMG_TYPE_PXE_BIOS 3
-#define IMG_TYPE_FCOE_BIOS 8
-#define IMG_TYPE_ISCSI_BACKUP 9
-#define IMG_TYPE_FCOE_FW_ACTIVE 10
-#define IMG_TYPE_FCOE_FW_BACKUP 11
-#define IMG_TYPE_NCSI_FW 13
-#define IMG_TYPE_PHY_FW 99
-#define TN_8022 13
-
-#define ILLEGAL_IOCTL_REQ 2
-#define FLASHROM_OPER_PHY_FLASH 9
-#define FLASHROM_OPER_PHY_SAVE 10
-#define FLASHROM_OPER_FLASH 1
-#define FLASHROM_OPER_SAVE 2
-#define FLASHROM_OPER_REPORT 4
-
-#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
-#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
-#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
-#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
-
-#define FLASH_NCSI_MAGIC (0x16032009)
-#define FLASH_NCSI_DISABLED (0)
-#define FLASH_NCSI_ENABLED (1)
-
-#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
-
-/* Offsets for components on Flash. */
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
-#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
-#define FLASH_iSCSI_BIOS_START_g2 (7340032)
-#define FLASH_PXE_BIOS_START_g2 (7864320)
-#define FLASH_FCoE_BIOS_START_g2 (524288)
-#define FLASH_REDBOOT_START_g2 (0)
-
-#define FLASH_NCSI_START_g3 (15990784)
-#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
-#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
-#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
-#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
-#define FLASH_iSCSI_BIOS_START_g3 (12582912)
-#define FLASH_PXE_BIOS_START_g3 (13107200)
-#define FLASH_FCoE_BIOS_START_g3 (13631488)
-#define FLASH_REDBOOT_START_g3 (262144)
-#define FLASH_PHY_FW_START_g3 1310720
-
-/************* Rx Packet Type Encoding **************/
-#define BE_UNICAST_PACKET 0
-#define BE_MULTICAST_PACKET 1
-#define BE_BROADCAST_PACKET 2
-#define BE_RSVD_PACKET 3
-
-/*
- * BE descriptors: host memory data structures whose formats
- * are hardwired in BE silicon.
- */
-/* Event Queue Descriptor */
-#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
-#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
-#define EQ_ENTRY_RES_ID_SHIFT 16
-
-struct be_eq_entry {
- u32 evt;
-};
-
-/* TX Queue Descriptor */
-#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
-struct be_eth_wrb {
- u32 frag_pa_hi; /* dword 0 */
- u32 frag_pa_lo; /* dword 1 */
- u32 rsvd0; /* dword 2 */
- u32 frag_len; /* dword 3: bits 0 - 15 */
-} __packed;
-
-/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
- * actual structure is defined as a byte : used to calculate
- * offset/shift/mask of each field */
-struct amap_eth_hdr_wrb {
- u8 rsvd0[32]; /* dword 0 */
- u8 rsvd1[32]; /* dword 1 */
- u8 complete; /* dword 2 */
- u8 event;
- u8 crc;
- u8 forward;
- u8 lso6;
- u8 mgmt;
- u8 ipcs;
- u8 udpcs;
- u8 tcpcs;
- u8 lso;
- u8 vlan;
- u8 gso[2];
- u8 num_wrb[5];
- u8 lso_mss[14];
- u8 len[16]; /* dword 3 */
- u8 vlan_tag[16];
-} __packed;
-
-struct be_eth_hdr_wrb {
- u32 dw[4];
-};
-
-/* TX Compl Queue Descriptor */
-
-/* Pseudo amap definition for eth_tx_compl in which each bit of the
- * actual structure is defined as a byte: used to calculate
- * offset/shift/mask of each field */
-struct amap_eth_tx_compl {
- u8 wrb_index[16]; /* dword 0 */
- u8 ct[2]; /* dword 0 */
- u8 port[2]; /* dword 0 */
- u8 rsvd0[8]; /* dword 0 */
- u8 status[4]; /* dword 0 */
- u8 user_bytes[16]; /* dword 1 */
- u8 nwh_bytes[8]; /* dword 1 */
- u8 lso; /* dword 1 */
- u8 cast_enc[2]; /* dword 1 */
- u8 rsvd1[5]; /* dword 1 */
- u8 rsvd2[32]; /* dword 2 */
- u8 pkts[16]; /* dword 3 */
- u8 ringid[11]; /* dword 3 */
- u8 hash_val[4]; /* dword 3 */
- u8 valid; /* dword 3 */
-} __packed;
-
-struct be_eth_tx_compl {
- u32 dw[4];
-};
-
-/* RX Queue Descriptor */
-struct be_eth_rx_d {
- u32 fragpa_hi;
- u32 fragpa_lo;
-};
-
-/* RX Compl Queue Descriptor */
-
-/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
- * each bit of the actual structure is defined as a byte: used to calculate
- * offset/shift/mask of each field */
-struct amap_eth_rx_compl_v0 {
- u8 vlan_tag[16]; /* dword 0 */
- u8 pktsize[14]; /* dword 0 */
- u8 port; /* dword 0 */
- u8 ip_opt; /* dword 0 */
- u8 err; /* dword 1 */
- u8 rsshp; /* dword 1 */
- u8 ipf; /* dword 1 */
- u8 tcpf; /* dword 1 */
- u8 udpf; /* dword 1 */
- u8 ipcksm; /* dword 1 */
- u8 l4_cksm; /* dword 1 */
- u8 ip_version; /* dword 1 */
- u8 macdst[6]; /* dword 1 */
- u8 vtp; /* dword 1 */
- u8 rsvd0; /* dword 1 */
- u8 fragndx[10]; /* dword 1 */
- u8 ct[2]; /* dword 1 */
- u8 sw; /* dword 1 */
- u8 numfrags[3]; /* dword 1 */
- u8 rss_flush; /* dword 2 */
- u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
- u8 rss_bank; /* dword 2 */
- u8 rsvd1[23]; /* dword 2 */
- u8 lro_pkt; /* dword 2 */
- u8 rsvd2[2]; /* dword 2 */
- u8 valid; /* dword 2 */
- u8 rsshash[32]; /* dword 3 */
-} __packed;
-
-/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
- * each bit of the actual structure is defined as a byte: used to calculate
- * offset/shift/mask of each field */
-struct amap_eth_rx_compl_v1 {
- u8 vlan_tag[16]; /* dword 0 */
- u8 pktsize[14]; /* dword 0 */
- u8 vtp; /* dword 0 */
- u8 ip_opt; /* dword 0 */
- u8 err; /* dword 1 */
- u8 rsshp; /* dword 1 */
- u8 ipf; /* dword 1 */
- u8 tcpf; /* dword 1 */
- u8 udpf; /* dword 1 */
- u8 ipcksm; /* dword 1 */
- u8 l4_cksm; /* dword 1 */
- u8 ip_version; /* dword 1 */
- u8 macdst[7]; /* dword 1 */
- u8 rsvd0; /* dword 1 */
- u8 fragndx[10]; /* dword 1 */
- u8 ct[2]; /* dword 1 */
- u8 sw; /* dword 1 */
- u8 numfrags[3]; /* dword 1 */
- u8 rss_flush; /* dword 2 */
- u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
- u8 rss_bank; /* dword 2 */
- u8 port[2]; /* dword 2 */
- u8 vntagp; /* dword 2 */
- u8 header_len[8]; /* dword 2 */
- u8 header_split[2]; /* dword 2 */
- u8 rsvd1[13]; /* dword 2 */
- u8 valid; /* dword 2 */
- u8 rsshash[32]; /* dword 3 */
-} __packed;
-
-struct be_eth_rx_compl {
- u32 dw[4];
-};
-
-struct mgmt_hba_attribs {
- u8 flashrom_version_string[32];
- u8 manufacturer_name[32];
- u32 supported_modes;
- u32 rsvd0[3];
- u8 ncsi_ver_string[12];
- u32 default_extended_timeout;
- u8 controller_model_number[32];
- u8 controller_description[64];
- u8 controller_serial_number[32];
- u8 ip_version_string[32];
- u8 firmware_version_string[32];
- u8 bios_version_string[32];
- u8 redboot_version_string[32];
- u8 driver_version_string[32];
- u8 fw_on_flash_version_string[32];
- u32 functionalities_supported;
- u16 max_cdblength;
- u8 asic_revision;
- u8 generational_guid[16];
- u8 hba_port_count;
- u16 default_link_down_timeout;
- u8 iscsi_ver_min_max;
- u8 multifunction_device;
- u8 cache_valid;
- u8 hba_status;
- u8 max_domains_supported;
- u8 phy_port;
- u32 firmware_post_status;
- u32 hba_mtu[8];
- u32 rsvd1[4];
-};
-
-struct mgmt_controller_attrib {
- struct mgmt_hba_attribs hba_attribs;
- u16 pci_vendor_id;
- u16 pci_device_id;
- u16 pci_sub_vendor_id;
- u16 pci_sub_system_id;
- u8 pci_bus_number;
- u8 pci_device_number;
- u8 pci_function_number;
- u8 interface_type;
- u64 unique_identifier;
- u32 rsvd0[5];
-};
-
-struct controller_id {
- u32 vendor;
- u32 device;
- u32 subvendor;
- u32 subdevice;
-};
-
-struct flash_comp {
- unsigned long offset;
- int optype;
- int size;
-};
-
-struct image_hdr {
- u32 imageid;
- u32 imageoffset;
- u32 imagelength;
- u32 image_checksum;
- u8 image_version[32];
-};
-struct flash_file_hdr_g2 {
- u8 sign[32];
- u32 cksum;
- u32 antidote;
- struct controller_id cont_id;
- u32 file_len;
- u32 chunk_num;
- u32 total_chunks;
- u32 num_imgs;
- u8 build[24];
-};
-
-struct flash_file_hdr_g3 {
- u8 sign[52];
- u8 ufi_version[4];
- u32 file_len;
- u32 cksum;
- u32 antidote;
- u32 num_imgs;
- u8 build[24];
- u8 rsvd[32];
-};
-
-struct flash_section_hdr {
- u32 format_rev;
- u32 cksum;
- u32 antidote;
- u32 build_no;
- u8 id_string[64];
- u32 active_entry_mask;
- u32 valid_entry_mask;
- u32 org_content_mask;
- u32 rsvd0;
- u32 rsvd1;
- u32 rsvd2;
- u32 rsvd3;
- u32 rsvd4;
-};
-
-struct flash_section_entry {
- u32 type;
- u32 offset;
- u32 pad_size;
- u32 image_size;
- u32 cksum;
- u32 entry_point;
- u32 rsvd0;
- u32 rsvd1;
- u8 ver_data[32];
-};
-
-struct flash_section_info {
- u8 cookie[32];
- struct flash_section_hdr fsec_hdr;
- struct flash_section_entry fsec_entry[32];
-};
+++ /dev/null
-/*
- * Copyright (C) 2005 - 2011 Emulex
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
- * Public License is included in this distribution in the file called COPYING.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-#include <linux/prefetch.h>
-#include "be.h"
-#include "be_cmds.h"
-#include <asm/div64.h>
-
-MODULE_VERSION(DRV_VER);
-MODULE_DEVICE_TABLE(pci, be_dev_ids);
-MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
-MODULE_LICENSE("GPL");
-
-static ushort rx_frag_size = 2048;
-static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
-module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
-MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
-
-static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
- { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
- { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
- { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, be_dev_ids);
-/* UE Status Low CSR */
-static const char * const ue_status_low_desc[] = {
- "CEV",
- "CTX",
- "DBUF",
- "ERX",
- "Host",
- "MPU",
- "NDMA",
- "PTC ",
- "RDMA ",
- "RXF ",
- "RXIPS ",
- "RXULP0 ",
- "RXULP1 ",
- "RXULP2 ",
- "TIM ",
- "TPOST ",
- "TPRE ",
- "TXIPS ",
- "TXULP0 ",
- "TXULP1 ",
- "UC ",
- "WDMA ",
- "TXULP2 ",
- "HOST1 ",
- "P0_OB_LINK ",
- "P1_OB_LINK ",
- "HOST_GPIO ",
- "MBOX ",
- "AXGMAC0",
- "AXGMAC1",
- "JTAG",
- "MPU_INTPEND"
-};
-/* UE Status High CSR */
-static const char * const ue_status_hi_desc[] = {
- "LPCMEMHOST",
- "MGMT_MAC",
- "PCS0ONLINE",
- "MPU_IRAM",
- "PCS1ONLINE",
- "PCTL0",
- "PCTL1",
- "PMEM",
- "RR",
- "TXPB",
- "RXPP",
- "XAUI",
- "TXP",
- "ARM",
- "IPC",
- "HOST2",
- "HOST3",
- "HOST4",
- "HOST5",
- "HOST6",
- "HOST7",
- "HOST8",
- "HOST9",
- "NETC",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown"
-};
-
-static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
-{
- struct be_dma_mem *mem = &q->dma_mem;
- if (mem->va)
- dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
- mem->dma);
-}
-
-static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
- u16 len, u16 entry_size)
-{
- struct be_dma_mem *mem = &q->dma_mem;
-
- memset(q, 0, sizeof(*q));
- q->len = len;
- q->entry_size = entry_size;
- mem->size = len * entry_size;
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
- if (!mem->va)
- return -1;
- memset(mem->va, 0, mem->size);
- return 0;
-}
-
-static void be_intr_set(struct be_adapter *adapter, bool enable)
-{
- u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
- u32 reg = ioread32(addr);
- u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
-
- if (adapter->eeh_err)
- return;
-
- if (!enabled && enable)
- reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
- else if (enabled && !enable)
- reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
- else
- return;
-
- iowrite32(reg, addr);
-}
-
-static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
-{
- u32 val = 0;
- val |= qid & DB_RQ_RING_ID_MASK;
- val |= posted << DB_RQ_NUM_POSTED_SHIFT;
-
- wmb();
- iowrite32(val, adapter->db + DB_RQ_OFFSET);
-}
-
-static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
-{
- u32 val = 0;
- val |= qid & DB_TXULP_RING_ID_MASK;
- val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
-
- wmb();
- iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
-}
-
-static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
-{
- u32 val = 0;
- val |= qid & DB_EQ_RING_ID_MASK;
- val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
- DB_EQ_RING_ID_EXT_MASK_SHIFT);
-
- if (adapter->eeh_err)
- return;
-
- if (arm)
- val |= 1 << DB_EQ_REARM_SHIFT;
- if (clear_int)
- val |= 1 << DB_EQ_CLR_SHIFT;
- val |= 1 << DB_EQ_EVNT_SHIFT;
- val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
- iowrite32(val, adapter->db + DB_EQ_OFFSET);
-}
-
-void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
-{
- u32 val = 0;
- val |= qid & DB_CQ_RING_ID_MASK;
- val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
- DB_CQ_RING_ID_EXT_MASK_SHIFT);
-
- if (adapter->eeh_err)
- return;
-
- if (arm)
- val |= 1 << DB_CQ_REARM_SHIFT;
- val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
- iowrite32(val, adapter->db + DB_CQ_OFFSET);
-}
-
-static int be_mac_addr_set(struct net_device *netdev, void *p)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
- int status = 0;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- /* MAC addr configuration will be done in hardware for VFs
- * by their corresponding PFs. Just copy to netdev addr here
- */
- if (!be_physfn(adapter))
- goto netdev_addr;
-
- status = be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id, 0);
- if (status)
- return status;
-
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id, 0);
-netdev_addr:
- if (!status)
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- return status;
-}
-
-static void populate_be2_stats(struct be_adapter *adapter)
-{
- struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
- struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
- struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
- struct be_port_rxf_stats_v0 *port_stats =
- &rxf_stats->port[adapter->port_num];
- struct be_drv_stats *drvs = &adapter->drv_stats;
-
- be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
- drvs->rx_pause_frames = port_stats->rx_pause_frames;
- drvs->rx_crc_errors = port_stats->rx_crc_errors;
- drvs->rx_control_frames = port_stats->rx_control_frames;
- drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
- drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
- drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
- drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
- drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
- drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
- drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
- drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
- drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
- drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
- drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
- drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
- drvs->rx_dropped_header_too_small =
- port_stats->rx_dropped_header_too_small;
- drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
- drvs->rx_alignment_symbol_errors =
- port_stats->rx_alignment_symbol_errors;
-
- drvs->tx_pauseframes = port_stats->tx_pauseframes;
- drvs->tx_controlframes = port_stats->tx_controlframes;
-
- if (adapter->port_num)
- drvs->jabber_events = rxf_stats->port1_jabber_events;
- else
- drvs->jabber_events = rxf_stats->port0_jabber_events;
- drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
- drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
- drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
- drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
- drvs->forwarded_packets = rxf_stats->forwarded_packets;
- drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
- drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
- drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
- adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
-}
-
-static void populate_be3_stats(struct be_adapter *adapter)
-{
- struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
- struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
- struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
- struct be_port_rxf_stats_v1 *port_stats =
- &rxf_stats->port[adapter->port_num];
- struct be_drv_stats *drvs = &adapter->drv_stats;
-
- be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
- drvs->rx_pause_frames = port_stats->rx_pause_frames;
- drvs->rx_crc_errors = port_stats->rx_crc_errors;
- drvs->rx_control_frames = port_stats->rx_control_frames;
- drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
- drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
- drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
- drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
- drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
- drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
- drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
- drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
- drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
- drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
- drvs->rx_dropped_header_too_small =
- port_stats->rx_dropped_header_too_small;
- drvs->rx_input_fifo_overflow_drop =
- port_stats->rx_input_fifo_overflow_drop;
- drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
- drvs->rx_alignment_symbol_errors =
- port_stats->rx_alignment_symbol_errors;
- drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
- drvs->tx_pauseframes = port_stats->tx_pauseframes;
- drvs->tx_controlframes = port_stats->tx_controlframes;
- drvs->jabber_events = port_stats->jabber_events;
- drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
- drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
- drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
- drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
- drvs->forwarded_packets = rxf_stats->forwarded_packets;
- drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
- drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
- drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
- adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
-}
-
-static void populate_lancer_stats(struct be_adapter *adapter)
-{
-
- struct be_drv_stats *drvs = &adapter->drv_stats;
- struct lancer_pport_stats *pport_stats =
- pport_stats_from_cmd(adapter);
-
- be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
- drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
- drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
- drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
- drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
- drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
- drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
- drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
- drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
- drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
- drvs->rx_dropped_tcp_length =
- pport_stats->rx_dropped_invalid_tcp_length;
- drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
- drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
- drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
- drvs->rx_dropped_header_too_small =
- pport_stats->rx_dropped_header_too_small;
- drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
- drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
- drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
- drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
- drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
- drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
- drvs->jabber_events = pport_stats->rx_jabbers;
- drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
- drvs->forwarded_packets = pport_stats->num_forwards_lo;
- drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
- drvs->rx_drops_too_many_frags =
- pport_stats->rx_drops_too_many_frags_lo;
-}
-
-void be_parse_stats(struct be_adapter *adapter)
-{
- struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
- struct be_rx_obj *rxo;
- int i;
-
- if (adapter->generation == BE_GEN3) {
- if (lancer_chip(adapter))
- populate_lancer_stats(adapter);
- else
- populate_be3_stats(adapter);
- } else {
- populate_be2_stats(adapter);
- }
-
- /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
- for_all_rx_queues(adapter, rxo, i)
- rx_stats(rxo)->rx_drops_no_frags =
- erx->rx_drops_no_fragments[rxo->q.id];
-}
-
-static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_drv_stats *drvs = &adapter->drv_stats;
- struct be_rx_obj *rxo;
- struct be_tx_obj *txo;
- u64 pkts, bytes;
- unsigned int start;
- int i;
-
- for_all_rx_queues(adapter, rxo, i) {
- const struct be_rx_stats *rx_stats = rx_stats(rxo);
- do {
- start = u64_stats_fetch_begin_bh(&rx_stats->sync);
- pkts = rx_stats(rxo)->rx_pkts;
- bytes = rx_stats(rxo)->rx_bytes;
- } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
- stats->rx_packets += pkts;
- stats->rx_bytes += bytes;
- stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
- stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
- rx_stats(rxo)->rx_drops_no_frags;
- }
-
- for_all_tx_queues(adapter, txo, i) {
- const struct be_tx_stats *tx_stats = tx_stats(txo);
- do {
- start = u64_stats_fetch_begin_bh(&tx_stats->sync);
- pkts = tx_stats(txo)->tx_pkts;
- bytes = tx_stats(txo)->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
- stats->tx_packets += pkts;
- stats->tx_bytes += bytes;
- }
-
- /* bad pkts received */
- stats->rx_errors = drvs->rx_crc_errors +
- drvs->rx_alignment_symbol_errors +
- drvs->rx_in_range_errors +
- drvs->rx_out_range_errors +
- drvs->rx_frame_too_long +
- drvs->rx_dropped_too_small +
- drvs->rx_dropped_too_short +
- drvs->rx_dropped_header_too_small +
- drvs->rx_dropped_tcp_length +
- drvs->rx_dropped_runt;
-
- /* detailed rx errors */
- stats->rx_length_errors = drvs->rx_in_range_errors +
- drvs->rx_out_range_errors +
- drvs->rx_frame_too_long;
-
- stats->rx_crc_errors = drvs->rx_crc_errors;
-
- /* frame alignment errors */
- stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
-
- /* receiver fifo overrun */
- /* drops_no_pbuf is no per i/f, it's per BE card */
- stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
- drvs->rx_input_fifo_overflow_drop +
- drvs->rx_drops_no_pbuf;
- return stats;
-}
-
-void be_link_status_update(struct be_adapter *adapter, u32 link_status)
-{
- struct net_device *netdev = adapter->netdev;
-
- /* when link status changes, link speed must be re-queried from card */
- adapter->link_speed = -1;
- if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
- netif_carrier_on(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
- } else {
- netif_carrier_off(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
- }
-}
-
-static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
-{
- struct be_tx_stats *stats = tx_stats(txo);
-
- u64_stats_update_begin(&stats->sync);
- stats->tx_reqs++;
- stats->tx_wrbs += wrb_cnt;
- stats->tx_bytes += copied;
- stats->tx_pkts += (gso_segs ? gso_segs : 1);
- if (stopped)
- stats->tx_stops++;
- u64_stats_update_end(&stats->sync);
-}
-
-/* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
-{
- int cnt = (skb->len > skb->data_len);
-
- cnt += skb_shinfo(skb)->nr_frags;
-
- /* to account for hdr wrb */
- cnt++;
- if (lancer_chip(adapter) || !(cnt & 1)) {
- *dummy = false;
- } else {
- /* add a dummy to make it an even num */
- cnt++;
- *dummy = true;
- }
- BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
- return cnt;
-}
-
-static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
-{
- wrb->frag_pa_hi = upper_32_bits(addr);
- wrb->frag_pa_lo = addr & 0xFFFFFFFF;
- wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
-}
-
-static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len)
-{
- u8 vlan_prio = 0;
- u16 vlan_tag = 0;
-
- memset(hdr, 0, sizeof(*hdr));
-
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
-
- if (skb_is_gso(skb)) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
- hdr, skb_shinfo(skb)->gso_size);
- if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
- if (lancer_chip(adapter) && adapter->sli_family ==
- LANCER_A0_SLI_FAMILY) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
- if (is_tcp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb,
- tcpcs, hdr, 1);
- else if (is_udp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb,
- udpcs, hdr, 1);
- }
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (is_tcp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
- else if (is_udp_pkt(skb))
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
- }
-
- if (vlan_tx_tag_present(skb)) {
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
- vlan_tag = vlan_tx_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- /* If vlan priority provided by OS is NOT in available bmap */
- if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
- vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
- adapter->recommended_prio;
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
- }
-
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
- AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
-}
-
-static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
- bool unmap_single)
-{
- dma_addr_t dma;
-
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
-
- dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
- if (wrb->frag_len) {
- if (unmap_single)
- dma_unmap_single(dev, dma, wrb->frag_len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
- }
-}
-
-static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
-{
- dma_addr_t busaddr;
- int i, copied = 0;
- struct device *dev = &adapter->pdev->dev;
- struct sk_buff *first_skb = skb;
- struct be_eth_wrb *wrb;
- struct be_eth_hdr_wrb *hdr;
- bool map_single = false;
- u16 map_head;
-
- hdr = queue_head_node(txq);
- queue_head_inc(txq);
- map_head = txq->head;
-
- if (skb->len > skb->data_len) {
- int len = skb_headlen(skb);
- busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, busaddr))
- goto dma_err;
- map_single = true;
- wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, len);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
- queue_head_inc(txq);
- copied += len;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
- busaddr = dma_map_page(dev, frag->page, frag->page_offset,
- frag->size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, busaddr))
- goto dma_err;
- wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, frag->size);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
- queue_head_inc(txq);
- copied += frag->size;
- }
-
- if (dummy_wrb) {
- wrb = queue_head_node(txq);
- wrb_fill(wrb, 0, 0);
- be_dws_cpu_to_le(wrb, sizeof(*wrb));
- queue_head_inc(txq);
- }
-
- wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
- be_dws_cpu_to_le(hdr, sizeof(*hdr));
-
- return copied;
-dma_err:
- txq->head = map_head;
- while (copied) {
- wrb = queue_head_node(txq);
- unmap_tx_frag(dev, wrb, map_single);
- map_single = false;
- copied -= wrb->frag_len;
- queue_head_inc(txq);
- }
- return 0;
-}
-
-static netdev_tx_t be_xmit(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
- struct be_queue_info *txq = &txo->q;
- u32 wrb_cnt = 0, copied = 0;
- u32 start = txq->head;
- bool dummy_wrb, stopped = false;
-
- wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
-
- copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
- if (copied) {
- /* record the sent skb in the sent_skb table */
- BUG_ON(txo->sent_skb_list[start]);
- txo->sent_skb_list[start] = skb;
-
- /* Ensure txq has space for the next skb; Else stop the queue
- * *BEFORE* ringing the tx doorbell, so that we serialze the
- * tx compls of the current transmit which'll wake up the queue
- */
- atomic_add(wrb_cnt, &txq->used);
- if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
- txq->len) {
- netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
- stopped = true;
- }
-
- be_txq_notify(adapter, txq->id, wrb_cnt);
-
- be_tx_stats_update(txo, wrb_cnt, copied,
- skb_shinfo(skb)->gso_segs, stopped);
- } else {
- txq->head = start;
- dev_kfree_skb_any(skb);
- }
- return NETDEV_TX_OK;
-}
-
-static int be_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- if (new_mtu < BE_MIN_MTU ||
- new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
- (ETH_HLEN + ETH_FCS_LEN))) {
- dev_info(&adapter->pdev->dev,
- "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU,
- (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
- return -EINVAL;
- }
- dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
- netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
- return 0;
-}
-
-/*
- * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
- * If the user configures more, place BE in vlan promiscuous mode.
- */
-static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
-{
- u16 vtag[BE_NUM_VLANS_SUPPORTED];
- u16 ntags = 0, i;
- int status = 0;
- u32 if_handle;
-
- if (vf) {
- if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
- vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
- status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
- }
-
- /* No need to further configure vids if in promiscuous mode */
- if (adapter->promiscuous)
- return 0;
-
- if (adapter->vlans_added <= adapter->max_vlans) {
- /* Construct VLAN Table to give to HW */
- for (i = 0; i < VLAN_N_VID; i++) {
- if (adapter->vlan_tag[i]) {
- vtag[ntags] = cpu_to_le16(i);
- ntags++;
- }
- }
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vtag, ntags, 1, 0);
- } else {
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- NULL, 0, 1, 1);
- }
-
- return status;
-}
-
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- adapter->vlans_added++;
- if (!be_physfn(adapter))
- return;
-
- adapter->vlan_tag[vid] = 1;
- if (adapter->vlans_added <= (adapter->max_vlans + 1))
- be_vid_config(adapter, false, 0);
-}
-
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- adapter->vlans_added--;
-
- if (!be_physfn(adapter))
- return;
-
- adapter->vlan_tag[vid] = 0;
- if (adapter->vlans_added <= adapter->max_vlans)
- be_vid_config(adapter, false, 0);
-}
-
-static void be_set_multicast_list(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (netdev->flags & IFF_PROMISC) {
- be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
- adapter->promiscuous = true;
- goto done;
- }
-
- /* BE was previously in promiscuous mode; disable it */
- if (adapter->promiscuous) {
- adapter->promiscuous = false;
- be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
-
- if (adapter->vlans_added)
- be_vid_config(adapter, false, 0);
- }
-
- /* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > BE_MAX_MC) {
- be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
- goto done;
- }
-
- be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
-done:
- return;
-}
-
-static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status;
-
- if (!adapter->sriov_enabled)
- return -EPERM;
-
- if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
- return -EINVAL;
-
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- status = be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
-
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
-
- if (status)
- dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
- mac, vf);
- else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
-
- return status;
-}
-
-static int be_get_vf_config(struct net_device *netdev, int vf,
- struct ifla_vf_info *vi)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (!adapter->sriov_enabled)
- return -EPERM;
-
- if (vf >= num_vfs)
- return -EINVAL;
-
- vi->vf = vf;
- vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
- vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
- vi->qos = 0;
- memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
-
- return 0;
-}
-
-static int be_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!adapter->sriov_enabled)
- return -EPERM;
-
- if ((vf >= num_vfs) || (vlan > 4095))
- return -EINVAL;
-
- if (vlan) {
- adapter->vf_cfg[vf].vf_vlan_tag = vlan;
- adapter->vlans_added++;
- } else {
- adapter->vf_cfg[vf].vf_vlan_tag = 0;
- adapter->vlans_added--;
- }
-
- status = be_vid_config(adapter, true, vf);
-
- if (status)
- dev_info(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed\n", vlan, vf);
- return status;
-}
-
-static int be_set_vf_tx_rate(struct net_device *netdev,
- int vf, int rate)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!adapter->sriov_enabled)
- return -EPERM;
-
- if ((vf >= num_vfs) || (rate < 0))
- return -EINVAL;
-
- if (rate > 10000)
- rate = 10000;
-
- adapter->vf_cfg[vf].vf_tx_rate = rate;
- status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
-
- if (status)
- dev_info(&adapter->pdev->dev,
- "tx rate %d on VF %d failed\n", rate, vf);
- return status;
-}
-
-static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
-{
- struct be_eq_obj *rx_eq = &rxo->rx_eq;
- struct be_rx_stats *stats = rx_stats(rxo);
- ulong now = jiffies;
- ulong delta = now - stats->rx_jiffies;
- u64 pkts;
- unsigned int start, eqd;
-
- if (!rx_eq->enable_aic)
- return;
-
- /* Wrapped around */
- if (time_before(now, stats->rx_jiffies)) {
- stats->rx_jiffies = now;
- return;
- }
-
- /* Update once a second */
- if (delta < HZ)
- return;
-
- do {
- start = u64_stats_fetch_begin_bh(&stats->sync);
- pkts = stats->rx_pkts;
- } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
- stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
- stats->rx_pkts_prev = pkts;
- stats->rx_jiffies = now;
- eqd = stats->rx_pps / 110000;
- eqd = eqd << 3;
- if (eqd > rx_eq->max_eqd)
- eqd = rx_eq->max_eqd;
- if (eqd < rx_eq->min_eqd)
- eqd = rx_eq->min_eqd;
- if (eqd < 10)
- eqd = 0;
- if (eqd != rx_eq->cur_eqd) {
- be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
- rx_eq->cur_eqd = eqd;
- }
-}
-
-static void be_rx_stats_update(struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
-{
- struct be_rx_stats *stats = rx_stats(rxo);
-
- u64_stats_update_begin(&stats->sync);
- stats->rx_compl++;
- stats->rx_bytes += rxcp->pkt_size;
- stats->rx_pkts++;
- if (rxcp->pkt_type == BE_MULTICAST_PACKET)
- stats->rx_mcast_pkts++;
- if (rxcp->err)
- stats->rx_compl_err++;
- u64_stats_update_end(&stats->sync);
-}
-
-static inline bool csum_passed(struct be_rx_compl_info *rxcp)
-{
- /* L4 checksum is not reliable for non TCP/UDP packets.
- * Also ignore ipcksm for ipv6 pkts */
- return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
- (rxcp->ip_csum || rxcp->ipv6);
-}
-
-static struct be_rx_page_info *
-get_rx_page_info(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u16 frag_idx)
-{
- struct be_rx_page_info *rx_page_info;
- struct be_queue_info *rxq = &rxo->q;
-
- rx_page_info = &rxo->page_info_tbl[frag_idx];
- BUG_ON(!rx_page_info->page);
-
- if (rx_page_info->last_page_user) {
- dma_unmap_page(&adapter->pdev->dev,
- dma_unmap_addr(rx_page_info, bus),
- adapter->big_page_size, DMA_FROM_DEVICE);
- rx_page_info->last_page_user = false;
- }
-
- atomic_dec(&rxq->used);
- return rx_page_info;
-}
-
-/* Throwaway the data in the Rx completion */
-static void be_rx_compl_discard(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
-{
- struct be_queue_info *rxq = &rxo->q;
- struct be_rx_page_info *page_info;
- u16 i, num_rcvd = rxcp->num_rcvd;
-
- for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- index_inc(&rxcp->rxq_idx, rxq->len);
- }
-}
-
-/*
- * skb_fill_rx_data forms a complete skb for an ether frame
- * indicated by rxcp.
- */
-static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
- struct sk_buff *skb, struct be_rx_compl_info *rxcp)
-{
- struct be_queue_info *rxq = &rxo->q;
- struct be_rx_page_info *page_info;
- u16 i, j;
- u16 hdr_len, curr_frag_len, remaining;
- u8 *start;
-
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
- start = page_address(page_info->page) + page_info->page_offset;
- prefetch(start);
-
- /* Copy data in the first descriptor of this completion */
- curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
-
- /* Copy the header portion into skb_data */
- hdr_len = min(BE_HDR_LEN, curr_frag_len);
- memcpy(skb->data, start, hdr_len);
- skb->len = curr_frag_len;
- if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
- /* Complete packet has now been moved to data */
- put_page(page_info->page);
- skb->data_len = 0;
- skb->tail += curr_frag_len;
- } else {
- skb_shinfo(skb)->nr_frags = 1;
- skb_shinfo(skb)->frags[0].page = page_info->page;
- skb_shinfo(skb)->frags[0].page_offset =
- page_info->page_offset + hdr_len;
- skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
- skb->data_len = curr_frag_len - hdr_len;
- skb->tail += hdr_len;
- }
- page_info->page = NULL;
-
- if (rxcp->pkt_size <= rx_frag_size) {
- BUG_ON(rxcp->num_rcvd != 1);
- return;
- }
-
- /* More frags present for this completion */
- index_inc(&rxcp->rxq_idx, rxq->len);
- remaining = rxcp->pkt_size - curr_frag_len;
- for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
- curr_frag_len = min(remaining, rx_frag_size);
-
- /* Coalesce all frags from the same physical page in one slot */
- if (page_info->page_offset == 0) {
- /* Fresh page */
- j++;
- skb_shinfo(skb)->frags[j].page = page_info->page;
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
- skb_shinfo(skb)->nr_frags++;
- } else {
- put_page(page_info->page);
- }
-
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
- skb->len += curr_frag_len;
- skb->data_len += curr_frag_len;
-
- remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
- page_info->page = NULL;
- }
- BUG_ON(j > MAX_SKB_FRAGS);
-}
-
-/* Process the RX completion indicated by rxcp when GRO is disabled */
-static void be_rx_compl_process(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
-{
- struct net_device *netdev = adapter->netdev;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
- if (unlikely(!skb)) {
- rx_stats(rxo)->rx_drops_no_skbs++;
- be_rx_compl_discard(adapter, rxo, rxcp);
- return;
- }
-
- skb_fill_rx_data(adapter, rxo, skb, rxcp);
-
- if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
-
- skb->truesize = skb->len + sizeof(struct sk_buff);
- skb->protocol = eth_type_trans(skb, netdev);
- if (adapter->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
-
-
- if (unlikely(rxcp->vlanf))
- __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
-
- netif_receive_skb(skb);
-}
-
-/* Process the RX completion indicated by rxcp when GRO is enabled */
-static void be_rx_compl_process_gro(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
-{
- struct be_rx_page_info *page_info;
- struct sk_buff *skb = NULL;
- struct be_queue_info *rxq = &rxo->q;
- struct be_eq_obj *eq_obj = &rxo->rx_eq;
- u16 remaining, curr_frag_len;
- u16 i, j;
-
- skb = napi_get_frags(&eq_obj->napi);
- if (!skb) {
- be_rx_compl_discard(adapter, rxo, rxcp);
- return;
- }
-
- remaining = rxcp->pkt_size;
- for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
-
- curr_frag_len = min(remaining, rx_frag_size);
-
- /* Coalesce all frags from the same physical page in one slot */
- if (i == 0 || page_info->page_offset == 0) {
- /* First frag or Fresh page */
- j++;
- skb_shinfo(skb)->frags[j].page = page_info->page;
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
- } else {
- put_page(page_info->page);
- }
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
-
- remaining -= curr_frag_len;
- index_inc(&rxcp->rxq_idx, rxq->len);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(j > MAX_SKB_FRAGS);
-
- skb_shinfo(skb)->nr_frags = j + 1;
- skb->len = rxcp->pkt_size;
- skb->data_len = rxcp->pkt_size;
- skb->truesize += rxcp->pkt_size;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (adapter->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = rxcp->rss_hash;
-
- if (unlikely(rxcp->vlanf))
- __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
-
- napi_gro_frags(&eq_obj->napi);
-}
-
-static void be_parse_rx_compl_v1(struct be_adapter *adapter,
- struct be_eth_rx_compl *compl,
- struct be_rx_compl_info *rxcp)
-{
- rxcp->pkt_size =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
- rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
- rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
- rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
- rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
- rxcp->ip_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
- rxcp->l4_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
- rxcp->ipv6 =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
- rxcp->num_rcvd =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
- rxcp->pkt_type =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
- rxcp->rss_hash =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
- if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
- compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
- compl);
- }
- rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
-}
-
-static void be_parse_rx_compl_v0(struct be_adapter *adapter,
- struct be_eth_rx_compl *compl,
- struct be_rx_compl_info *rxcp)
-{
- rxcp->pkt_size =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
- rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
- rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
- rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
- rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
- rxcp->ip_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
- rxcp->l4_csum =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
- rxcp->ipv6 =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
- rxcp->rxq_idx =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
- rxcp->num_rcvd =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
- rxcp->pkt_type =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
- rxcp->rss_hash =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
- if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
- compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
- compl);
- }
- rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
-}
-
-static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
-{
- struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
- struct be_rx_compl_info *rxcp = &rxo->rxcp;
- struct be_adapter *adapter = rxo->adapter;
-
- /* For checking the valid bit it is Ok to use either definition as the
- * valid bit is at the same position in both v0 and v1 Rx compl */
- if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
- return NULL;
-
- rmb();
- be_dws_le_to_cpu(compl, sizeof(*compl));
-
- if (adapter->be3_native)
- be_parse_rx_compl_v1(adapter, compl, rxcp);
- else
- be_parse_rx_compl_v0(adapter, compl, rxcp);
-
- if (rxcp->vlanf) {
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->function_mode & 0x400) && !rxcp->vtm)
- rxcp->vlanf = 0;
-
- if (!lancer_chip(adapter))
- rxcp->vlan_tag = swab16(rxcp->vlan_tag);
-
- if (((adapter->pvid & VLAN_VID_MASK) ==
- (rxcp->vlan_tag & VLAN_VID_MASK)) &&
- !adapter->vlan_tag[rxcp->vlan_tag])
- rxcp->vlanf = 0;
- }
-
- /* As the compl has been parsed, reset it; we wont touch it again */
- compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
-
- queue_tail_inc(&rxo->cq);
- return rxcp;
-}
-
-static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
-{
- u32 order = get_order(size);
-
- if (order > 0)
- gfp |= __GFP_COMP;
- return alloc_pages(gfp, order);
-}
-
-/*
- * Allocate a page, split it to fragments of size rx_frag_size and post as
- * receive buffers to BE
- */
-static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
-{
- struct be_adapter *adapter = rxo->adapter;
- struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
- struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
- struct be_queue_info *rxq = &rxo->q;
- struct page *pagep = NULL;
- struct be_eth_rx_d *rxd;
- u64 page_dmaaddr = 0, frag_dmaaddr;
- u32 posted, page_offset = 0;
-
- page_info = &rxo->page_info_tbl[rxq->head];
- for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
- if (!pagep) {
- pagep = be_alloc_pages(adapter->big_page_size, gfp);
- if (unlikely(!pagep)) {
- rx_stats(rxo)->rx_post_fail++;
- break;
- }
- page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
- 0, adapter->big_page_size,
- DMA_FROM_DEVICE);
- page_info->page_offset = 0;
- } else {
- get_page(pagep);
- page_info->page_offset = page_offset + rx_frag_size;
- }
- page_offset = page_info->page_offset;
- page_info->page = pagep;
- dma_unmap_addr_set(page_info, bus, page_dmaaddr);
- frag_dmaaddr = page_dmaaddr + page_info->page_offset;
-
- rxd = queue_head_node(rxq);
- rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
- rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
-
- /* Any space left in the current big page for another frag? */
- if ((page_offset + rx_frag_size + rx_frag_size) >
- adapter->big_page_size) {
- pagep = NULL;
- page_info->last_page_user = true;
- }
-
- prev_page_info = page_info;
- queue_head_inc(rxq);
- page_info = &page_info_tbl[rxq->head];
- }
- if (pagep)
- prev_page_info->last_page_user = true;
-
- if (posted) {
- atomic_add(posted, &rxq->used);
- be_rxq_notify(adapter, rxq->id, posted);
- } else if (atomic_read(&rxq->used) == 0) {
- /* Let be_worker replenish when memory is available */
- rxo->rx_post_starved = true;
- }
-}
-
-static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
-{
- struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
-
- if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
- return NULL;
-
- rmb();
- be_dws_le_to_cpu(txcp, sizeof(*txcp));
-
- txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
-
- queue_tail_inc(tx_cq);
- return txcp;
-}
-
-static u16 be_tx_compl_process(struct be_adapter *adapter,
- struct be_tx_obj *txo, u16 last_index)
-{
- struct be_queue_info *txq = &txo->q;
- struct be_eth_wrb *wrb;
- struct sk_buff **sent_skbs = txo->sent_skb_list;
- struct sk_buff *sent_skb;
- u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
- bool unmap_skb_hdr = true;
-
- sent_skb = sent_skbs[txq->tail];
- BUG_ON(!sent_skb);
- sent_skbs[txq->tail] = NULL;
-
- /* skip header wrb */
- queue_tail_inc(txq);
-
- do {
- cur_index = txq->tail;
- wrb = queue_tail_node(txq);
- unmap_tx_frag(&adapter->pdev->dev, wrb,
- (unmap_skb_hdr && skb_headlen(sent_skb)));
- unmap_skb_hdr = false;
-
- num_wrbs++;
- queue_tail_inc(txq);
- } while (cur_index != last_index);
-
- kfree_skb(sent_skb);
- return num_wrbs;
-}
-
-static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
-{
- struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
-
- if (!eqe->evt)
- return NULL;
-
- rmb();
- eqe->evt = le32_to_cpu(eqe->evt);
- queue_tail_inc(&eq_obj->q);
- return eqe;
-}
-
-static int event_handle(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj,
- bool rearm)
-{
- struct be_eq_entry *eqe;
- u16 num = 0;
-
- while ((eqe = event_get(eq_obj)) != NULL) {
- eqe->evt = 0;
- num++;
- }
-
- /* Deal with any spurious interrupts that come
- * without events
- */
- if (!num)
- rearm = true;
-
- be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
- if (num)
- napi_schedule(&eq_obj->napi);
-
- return num;
-}
-
-/* Just read and notify events without processing them.
- * Used at the time of destroying event queues */
-static void be_eq_clean(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj)
-{
- struct be_eq_entry *eqe;
- u16 num = 0;
-
- while ((eqe = event_get(eq_obj)) != NULL) {
- eqe->evt = 0;
- num++;
- }
-
- if (num)
- be_eq_notify(adapter, eq_obj->q.id, false, true, num);
-}
-
-static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
-{
- struct be_rx_page_info *page_info;
- struct be_queue_info *rxq = &rxo->q;
- struct be_queue_info *rx_cq = &rxo->cq;
- struct be_rx_compl_info *rxcp;
- u16 tail;
-
- /* First cleanup pending rx completions */
- while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
- be_rx_compl_discard(adapter, rxo, rxcp);
- be_cq_notify(adapter, rx_cq->id, false, 1);
- }
-
- /* Then free posted rx buffer that were not used */
- tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
- for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
- page_info = get_rx_page_info(adapter, rxo, tail);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(atomic_read(&rxq->used));
- rxq->tail = rxq->head = 0;
-}
-
-static void be_tx_compl_clean(struct be_adapter *adapter,
- struct be_tx_obj *txo)
-{
- struct be_queue_info *tx_cq = &txo->cq;
- struct be_queue_info *txq = &txo->q;
- struct be_eth_tx_compl *txcp;
- u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
- struct sk_buff **sent_skbs = txo->sent_skb_list;
- struct sk_buff *sent_skb;
- bool dummy_wrb;
-
- /* Wait for a max of 200ms for all the tx-completions to arrive. */
- do {
- while ((txcp = be_tx_compl_get(tx_cq))) {
- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
- num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
- cmpl++;
- }
- if (cmpl) {
- be_cq_notify(adapter, tx_cq->id, false, cmpl);
- atomic_sub(num_wrbs, &txq->used);
- cmpl = 0;
- num_wrbs = 0;
- }
-
- if (atomic_read(&txq->used) == 0 || ++timeo > 200)
- break;
-
- mdelay(1);
- } while (true);
-
- if (atomic_read(&txq->used))
- dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
- atomic_read(&txq->used));
-
- /* free posted tx for which compls will never arrive */
- while (atomic_read(&txq->used)) {
- sent_skb = sent_skbs[txq->tail];
- end_idx = txq->tail;
- index_adv(&end_idx,
- wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
- txq->len);
- num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
- atomic_sub(num_wrbs, &txq->used);
- }
-}
-
-static void be_mcc_queues_destroy(struct be_adapter *adapter)
-{
- struct be_queue_info *q;
-
- q = &adapter->mcc_obj.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
- be_queue_free(adapter, q);
-
- q = &adapter->mcc_obj.cq;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
- be_queue_free(adapter, q);
-}
-
-/* Must be called only after TX qs are created as MCC shares TX EQ */
-static int be_mcc_queues_create(struct be_adapter *adapter)
-{
- struct be_queue_info *q, *cq;
-
- /* Alloc MCC compl queue */
- cq = &adapter->mcc_obj.cq;
- if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
- sizeof(struct be_mcc_compl)))
- goto err;
-
- /* Ask BE to create MCC compl queue; share TX's eq */
- if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
- goto mcc_cq_free;
-
- /* Alloc MCC queue */
- q = &adapter->mcc_obj.q;
- if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
- goto mcc_cq_destroy;
-
- /* Ask BE to create MCC queue */
- if (be_cmd_mccq_create(adapter, q, cq))
- goto mcc_q_free;
-
- return 0;
-
-mcc_q_free:
- be_queue_free(adapter, q);
-mcc_cq_destroy:
- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
-mcc_cq_free:
- be_queue_free(adapter, cq);
-err:
- return -1;
-}
-
-static void be_tx_queues_destroy(struct be_adapter *adapter)
-{
- struct be_queue_info *q;
- struct be_tx_obj *txo;
- u8 i;
-
- for_all_tx_queues(adapter, txo, i) {
- q = &txo->q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
- be_queue_free(adapter, q);
-
- q = &txo->cq;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
- be_queue_free(adapter, q);
- }
-
- /* Clear any residual events */
- be_eq_clean(adapter, &adapter->tx_eq);
-
- q = &adapter->tx_eq.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- be_queue_free(adapter, q);
-}
-
-/* One TX event queue is shared by all TX compl qs */
-static int be_tx_queues_create(struct be_adapter *adapter)
-{
- struct be_queue_info *eq, *q, *cq;
- struct be_tx_obj *txo;
- u8 i;
-
- adapter->tx_eq.max_eqd = 0;
- adapter->tx_eq.min_eqd = 0;
- adapter->tx_eq.cur_eqd = 96;
- adapter->tx_eq.enable_aic = false;
-
- eq = &adapter->tx_eq.q;
- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry)))
- return -1;
-
- if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
- goto err;
- adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
-
- for_all_tx_queues(adapter, txo, i) {
- cq = &txo->cq;
- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
- sizeof(struct be_eth_tx_compl)))
- goto err;
-
- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
- goto err;
-
- q = &txo->q;
- if (be_queue_alloc(adapter, q, TX_Q_LEN,
- sizeof(struct be_eth_wrb)))
- goto err;
-
- if (be_cmd_txq_create(adapter, q, cq))
- goto err;
- }
- return 0;
-
-err:
- be_tx_queues_destroy(adapter);
- return -1;
-}
-
-static void be_rx_queues_destroy(struct be_adapter *adapter)
-{
- struct be_queue_info *q;
- struct be_rx_obj *rxo;
- int i;
-
- for_all_rx_queues(adapter, rxo, i) {
- be_queue_free(adapter, &rxo->q);
-
- q = &rxo->cq;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
- be_queue_free(adapter, q);
-
- q = &rxo->rx_eq.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- be_queue_free(adapter, q);
- }
-}
-
-static u32 be_num_rxqs_want(struct be_adapter *adapter)
-{
- if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
- return 1 + MAX_RSS_QS; /* one default non-RSS queue */
- } else {
- dev_warn(&adapter->pdev->dev,
- "No support for multiple RX queues\n");
- return 1;
- }
-}
-
-static int be_rx_queues_create(struct be_adapter *adapter)
-{
- struct be_queue_info *eq, *q, *cq;
- struct be_rx_obj *rxo;
- int rc, i;
-
- adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
- msix_enabled(adapter) ?
- adapter->num_msix_vec - 1 : 1);
- if (adapter->num_rx_qs != MAX_RX_QS)
- dev_warn(&adapter->pdev->dev,
- "Can create only %d RX queues", adapter->num_rx_qs);
-
- adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
- for_all_rx_queues(adapter, rxo, i) {
- rxo->adapter = adapter;
- rxo->rx_eq.max_eqd = BE_MAX_EQD;
- rxo->rx_eq.enable_aic = true;
-
- /* EQ */
- eq = &rxo->rx_eq.q;
- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
- if (rc)
- goto err;
-
- rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
- if (rc)
- goto err;
-
- rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
-
- /* CQ */
- cq = &rxo->cq;
- rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- sizeof(struct be_eth_rx_compl));
- if (rc)
- goto err;
-
- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
- if (rc)
- goto err;
-
- /* Rx Q - will be created in be_open() */
- q = &rxo->q;
- rc = be_queue_alloc(adapter, q, RX_Q_LEN,
- sizeof(struct be_eth_rx_d));
- if (rc)
- goto err;
-
- }
-
- return 0;
-err:
- be_rx_queues_destroy(adapter);
- return -1;
-}
-
-static bool event_peek(struct be_eq_obj *eq_obj)
-{
- struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
- if (!eqe->evt)
- return false;
- else
- return true;
-}
-
-static irqreturn_t be_intx(int irq, void *dev)
-{
- struct be_adapter *adapter = dev;
- struct be_rx_obj *rxo;
- int isr, i, tx = 0 , rx = 0;
-
- if (lancer_chip(adapter)) {
- if (event_peek(&adapter->tx_eq))
- tx = event_handle(adapter, &adapter->tx_eq, false);
- for_all_rx_queues(adapter, rxo, i) {
- if (event_peek(&rxo->rx_eq))
- rx |= event_handle(adapter, &rxo->rx_eq, true);
- }
-
- if (!(tx || rx))
- return IRQ_NONE;
-
- } else {
- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
- if (!isr)
- return IRQ_NONE;
-
- if ((1 << adapter->tx_eq.eq_idx & isr))
- event_handle(adapter, &adapter->tx_eq, false);
-
- for_all_rx_queues(adapter, rxo, i) {
- if ((1 << rxo->rx_eq.eq_idx & isr))
- event_handle(adapter, &rxo->rx_eq, true);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t be_msix_rx(int irq, void *dev)
-{
- struct be_rx_obj *rxo = dev;
- struct be_adapter *adapter = rxo->adapter;
-
- event_handle(adapter, &rxo->rx_eq, true);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
-{
- struct be_adapter *adapter = dev;
-
- event_handle(adapter, &adapter->tx_eq, false);
-
- return IRQ_HANDLED;
-}
-
-static inline bool do_gro(struct be_rx_compl_info *rxcp)
-{
- return (rxcp->tcpf && !rxcp->err) ? true : false;
-}
-
-static int be_poll_rx(struct napi_struct *napi, int budget)
-{
- struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
- struct be_adapter *adapter = rxo->adapter;
- struct be_queue_info *rx_cq = &rxo->cq;
- struct be_rx_compl_info *rxcp;
- u32 work_done;
-
- rx_stats(rxo)->rx_polls++;
- for (work_done = 0; work_done < budget; work_done++) {
- rxcp = be_rx_compl_get(rxo);
- if (!rxcp)
- break;
-
- /* Is it a flush compl that has no data */
- if (unlikely(rxcp->num_rcvd == 0))
- goto loop_continue;
-
- /* Discard compl with partial DMA Lancer B0 */
- if (unlikely(!rxcp->pkt_size)) {
- be_rx_compl_discard(adapter, rxo, rxcp);
- goto loop_continue;
- }
-
- /* On BE drop pkts that arrive due to imperfect filtering in
- * promiscuous mode on some skews
- */
- if (unlikely(rxcp->port != adapter->port_num &&
- !lancer_chip(adapter))) {
- be_rx_compl_discard(adapter, rxo, rxcp);
- goto loop_continue;
- }
-
- if (do_gro(rxcp))
- be_rx_compl_process_gro(adapter, rxo, rxcp);
- else
- be_rx_compl_process(adapter, rxo, rxcp);
-loop_continue:
- be_rx_stats_update(rxo, rxcp);
- }
-
- /* Refill the queue */
- if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
- be_post_rx_frags(rxo, GFP_ATOMIC);
-
- /* All consumed */
- if (work_done < budget) {
- napi_complete(napi);
- be_cq_notify(adapter, rx_cq->id, true, work_done);
- } else {
- /* More to be consumed; continue with interrupts disabled */
- be_cq_notify(adapter, rx_cq->id, false, work_done);
- }
- return work_done;
-}
-
-/* As TX and MCC share the same EQ check for both TX and MCC completions.
- * For TX/MCC we don't honour budget; consume everything
- */
-static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
-{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_tx_obj *txo;
- struct be_eth_tx_compl *txcp;
- int tx_compl, mcc_compl, status = 0;
- u8 i;
- u16 num_wrbs;
-
- for_all_tx_queues(adapter, txo, i) {
- tx_compl = 0;
- num_wrbs = 0;
- while ((txcp = be_tx_compl_get(&txo->cq))) {
- num_wrbs += be_tx_compl_process(adapter, txo,
- AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp));
- tx_compl++;
- }
- if (tx_compl) {
- be_cq_notify(adapter, txo->cq.id, true, tx_compl);
-
- atomic_sub(num_wrbs, &txo->q.used);
-
- /* As Tx wrbs have been freed up, wake up netdev queue
- * if it was stopped due to lack of tx wrbs. */
- if (__netif_subqueue_stopped(adapter->netdev, i) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
- netif_wake_subqueue(adapter->netdev, i);
- }
-
- u64_stats_update_begin(&tx_stats(txo)->sync_compl);
- tx_stats(txo)->tx_compl += tx_compl;
- u64_stats_update_end(&tx_stats(txo)->sync_compl);
- }
- }
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
- }
-
- napi_complete(napi);
-
- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
- adapter->drv_stats.tx_events++;
- return 1;
-}
-
-void be_detect_dump_ue(struct be_adapter *adapter)
-{
- u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
- u32 i;
-
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_status_lo);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_status_hi);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
- pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
-
- ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
- ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
-
- if (ue_status_lo || ue_status_hi) {
- adapter->ue_detected = true;
- adapter->eeh_err = true;
- dev_err(&adapter->pdev->dev, "UE Detected!!\n");
- }
-
- if (ue_status_lo) {
- for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
- if (ue_status_lo & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_low_desc[i]);
- }
- }
- if (ue_status_hi) {
- for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
- if (ue_status_hi & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_hi_desc[i]);
- }
- }
-
-}
-
-static void be_worker(struct work_struct *work)
-{
- struct be_adapter *adapter =
- container_of(work, struct be_adapter, work.work);
- struct be_rx_obj *rxo;
- int i;
-
- if (!adapter->ue_detected && !lancer_chip(adapter))
- be_detect_dump_ue(adapter);
-
- /* when interrupts are not yet enabled, just reap any pending
- * mcc completions */
- if (!netif_running(adapter->netdev)) {
- int mcc_compl, status = 0;
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
- }
-
- goto reschedule;
- }
-
- if (!adapter->stats_cmd_sent) {
- if (lancer_chip(adapter))
- lancer_cmd_get_pport_stats(adapter,
- &adapter->stats_cmd);
- else
- be_cmd_get_stats(adapter, &adapter->stats_cmd);
- }
-
- for_all_rx_queues(adapter, rxo, i) {
- be_rx_eqd_update(adapter, rxo);
-
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
- be_post_rx_frags(rxo, GFP_KERNEL);
- }
- }
-
-reschedule:
- adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
-static void be_msix_disable(struct be_adapter *adapter)
-{
- if (msix_enabled(adapter)) {
- pci_disable_msix(adapter->pdev);
- adapter->num_msix_vec = 0;
- }
-}
-
-static void be_msix_enable(struct be_adapter *adapter)
-{
-#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
- int i, status, num_vec;
-
- num_vec = be_num_rxqs_want(adapter) + 1;
-
- for (i = 0; i < num_vec; i++)
- adapter->msix_entries[i].entry = i;
-
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
- if (status == 0) {
- goto done;
- } else if (status >= BE_MIN_MSIX_VECTORS) {
- num_vec = status;
- if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
- num_vec) == 0)
- goto done;
- }
- return;
-done:
- adapter->num_msix_vec = num_vec;
- return;
-}
-
-static void be_sriov_enable(struct be_adapter *adapter)
-{
- be_check_sriov_fn_type(adapter);
-#ifdef CONFIG_PCI_IOV
- if (be_physfn(adapter) && num_vfs) {
- int status, pos;
- u16 nvfs;
-
- pos = pci_find_ext_capability(adapter->pdev,
- PCI_EXT_CAP_ID_SRIOV);
- pci_read_config_word(adapter->pdev,
- pos + PCI_SRIOV_TOTAL_VF, &nvfs);
-
- if (num_vfs > nvfs) {
- dev_info(&adapter->pdev->dev,
- "Device supports %d VFs and not %d\n",
- nvfs, num_vfs);
- num_vfs = nvfs;
- }
-
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- adapter->sriov_enabled = status ? false : true;
- }
-#endif
-}
-
-static void be_sriov_disable(struct be_adapter *adapter)
-{
-#ifdef CONFIG_PCI_IOV
- if (adapter->sriov_enabled) {
- pci_disable_sriov(adapter->pdev);
- adapter->sriov_enabled = false;
- }
-#endif
-}
-
-static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj)
-{
- return adapter->msix_entries[eq_obj->eq_idx].vector;
-}
-
-static int be_request_irq(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj,
- void *handler, char *desc, void *context)
-{
- struct net_device *netdev = adapter->netdev;
- int vec;
-
- sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
- vec = be_msix_vec_get(adapter, eq_obj);
- return request_irq(vec, handler, 0, eq_obj->desc, context);
-}
-
-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
- void *context)
-{
- int vec = be_msix_vec_get(adapter, eq_obj);
- free_irq(vec, context);
-}
-
-static int be_msix_register(struct be_adapter *adapter)
-{
- struct be_rx_obj *rxo;
- int status, i;
- char qname[10];
-
- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
- adapter);
- if (status)
- goto err;
-
- for_all_rx_queues(adapter, rxo, i) {
- sprintf(qname, "rxq%d", i);
- status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
- qname, rxo);
- if (status)
- goto err_msix;
- }
-
- return 0;
-
-err_msix:
- be_free_irq(adapter, &adapter->tx_eq, adapter);
-
- for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
- be_free_irq(adapter, &rxo->rx_eq, rxo);
-
-err:
- dev_warn(&adapter->pdev->dev,
- "MSIX Request IRQ failed - err %d\n", status);
- be_msix_disable(adapter);
- return status;
-}
-
-static int be_irq_register(struct be_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int status;
-
- if (msix_enabled(adapter)) {
- status = be_msix_register(adapter);
- if (status == 0)
- goto done;
- /* INTx is not supported for VF */
- if (!be_physfn(adapter))
- return status;
- }
-
- /* INTx */
- netdev->irq = adapter->pdev->irq;
- status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
- adapter);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "INTx request IRQ failed - err %d\n", status);
- return status;
- }
-done:
- adapter->isr_registered = true;
- return 0;
-}
-
-static void be_irq_unregister(struct be_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct be_rx_obj *rxo;
- int i;
-
- if (!adapter->isr_registered)
- return;
-
- /* INTx */
- if (!msix_enabled(adapter)) {
- free_irq(netdev->irq, adapter);
- goto done;
- }
-
- /* MSIx */
- be_free_irq(adapter, &adapter->tx_eq, adapter);
-
- for_all_rx_queues(adapter, rxo, i)
- be_free_irq(adapter, &rxo->rx_eq, rxo);
-
-done:
- adapter->isr_registered = false;
-}
-
-static void be_rx_queues_clear(struct be_adapter *adapter)
-{
- struct be_queue_info *q;
- struct be_rx_obj *rxo;
- int i;
-
- for_all_rx_queues(adapter, rxo, i) {
- q = &rxo->q;
- if (q->created) {
- be_cmd_rxq_destroy(adapter, q);
- /* After the rxq is invalidated, wait for a grace time
- * of 1ms for all dma to end and the flush compl to
- * arrive
- */
- mdelay(1);
- be_rx_q_clean(adapter, rxo);
- }
-
- /* Clear any residual events */
- q = &rxo->rx_eq.q;
- if (q->created)
- be_eq_clean(adapter, &rxo->rx_eq);
- }
-}
-
-static int be_close(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
- struct be_tx_obj *txo;
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- int vec, i;
-
- be_async_mcc_disable(adapter);
-
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
-
- for_all_rx_queues(adapter, rxo, i)
- napi_disable(&rxo->rx_eq.napi);
-
- napi_disable(&tx_eq->napi);
-
- if (lancer_chip(adapter)) {
- be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
- for_all_rx_queues(adapter, rxo, i)
- be_cq_notify(adapter, rxo->cq.id, false, 0);
- for_all_tx_queues(adapter, txo, i)
- be_cq_notify(adapter, txo->cq.id, false, 0);
- }
-
- if (msix_enabled(adapter)) {
- vec = be_msix_vec_get(adapter, tx_eq);
- synchronize_irq(vec);
-
- for_all_rx_queues(adapter, rxo, i) {
- vec = be_msix_vec_get(adapter, &rxo->rx_eq);
- synchronize_irq(vec);
- }
- } else {
- synchronize_irq(netdev->irq);
- }
- be_irq_unregister(adapter);
-
- /* Wait for all pending tx completions to arrive so that
- * all tx skbs are freed.
- */
- for_all_tx_queues(adapter, txo, i)
- be_tx_compl_clean(adapter, txo);
-
- be_rx_queues_clear(adapter);
- return 0;
-}
-
-static int be_rx_queues_setup(struct be_adapter *adapter)
-{
- struct be_rx_obj *rxo;
- int rc, i;
- u8 rsstable[MAX_RSS_QS];
-
- for_all_rx_queues(adapter, rxo, i) {
- rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
- rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
- adapter->if_handle,
- (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
- if (rc)
- return rc;
- }
-
- if (be_multi_rxq(adapter)) {
- for_all_rss_queues(adapter, rxo, i)
- rsstable[i] = rxo->rss_id;
-
- rc = be_cmd_rss_config(adapter, rsstable,
- adapter->num_rx_qs - 1);
- if (rc)
- return rc;
- }
-
- /* First time posting */
- for_all_rx_queues(adapter, rxo, i) {
- be_post_rx_frags(rxo, GFP_KERNEL);
- napi_enable(&rxo->rx_eq.napi);
- }
- return 0;
-}
-
-static int be_open(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *tx_eq = &adapter->tx_eq;
- struct be_rx_obj *rxo;
- int status, i;
-
- status = be_rx_queues_setup(adapter);
- if (status)
- goto err;
-
- napi_enable(&tx_eq->napi);
-
- be_irq_register(adapter);
-
- if (!lancer_chip(adapter))
- be_intr_set(adapter, true);
-
- /* The evt queues are created in unarmed state; arm them */
- for_all_rx_queues(adapter, rxo, i) {
- be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
- be_cq_notify(adapter, rxo->cq.id, true, 0);
- }
- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
-
- /* Now that interrupts are on we can process async mcc */
- be_async_mcc_enable(adapter);
-
- if (be_physfn(adapter)) {
- status = be_vid_config(adapter, false, 0);
- if (status)
- goto err;
-
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- goto err;
- }
-
- return 0;
-err:
- be_close(adapter->netdev);
- return -EIO;
-}
-
-static int be_setup_wol(struct be_adapter *adapter, bool enable)
-{
- struct be_dma_mem cmd;
- int status = 0;
- u8 mac[ETH_ALEN];
-
- memset(mac, 0, ETH_ALEN);
-
- cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
- if (cmd.va == NULL)
- return -1;
- memset(cmd.va, 0, cmd.size);
-
- if (enable) {
- status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan\n");
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
- cmd.dma);
- return status;
- }
- status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
- } else {
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
- }
-
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- return status;
-}
-
-/*
- * Generate a seed MAC address from the PF MAC Address using jhash.
- * MAC Address for VFs are assigned incrementally starting from the seed.
- * These addresses are programmed in the ASIC by the PF and the VF driver
- * queries for the MAC address during its probe.
- */
-static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
-{
- u32 vf = 0;
- int status = 0;
- u8 mac[ETH_ALEN];
-
- be_vf_eth_addr_generate(adapter, mac);
-
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id,
- vf + 1);
- if (status)
- dev_err(&adapter->pdev->dev,
- "Mac address add failed for VF %d\n", vf);
- else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
-
- mac[5] += 1;
- }
- return status;
-}
-
-static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
-{
- u32 vf;
-
- for (vf = 0; vf < num_vfs; vf++) {
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
- }
-}
-
-static int be_setup(struct be_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- u32 cap_flags, en_flags, vf = 0;
- int status;
- u8 mac[ETH_ALEN];
-
- be_cmd_req_native_mode(adapter);
-
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
-
- if (be_physfn(adapter)) {
- cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
-
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
- cap_flags |= BE_IF_FLAGS_RSS;
- en_flags |= BE_IF_FLAGS_RSS;
- }
- }
-
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- netdev->dev_addr, false/* pmac_invalid */,
- &adapter->if_handle, &adapter->pmac_id, 0);
- if (status != 0)
- goto do_none;
-
- if (be_physfn(adapter)) {
- if (adapter->sriov_enabled) {
- while (vf < num_vfs) {
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST;
- status = be_cmd_if_create(adapter, cap_flags,
- en_flags, mac, true,
- &adapter->vf_cfg[vf].vf_if_handle,
- NULL, vf+1);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Interface Create failed for VF %d\n",
- vf);
- goto if_destroy;
- }
- adapter->vf_cfg[vf].vf_pmac_id =
- BE_INVALID_PMAC_ID;
- vf++;
- }
- }
- } else {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
- if (!status) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
- }
-
- status = be_tx_queues_create(adapter);
- if (status != 0)
- goto if_destroy;
-
- status = be_rx_queues_create(adapter);
- if (status != 0)
- goto tx_qs_destroy;
-
- /* Allow all priorities by default. A GRP5 evt may modify this */
- adapter->vlan_prio_bmap = 0xff;
-
- status = be_mcc_queues_create(adapter);
- if (status != 0)
- goto rx_qs_destroy;
-
- adapter->link_speed = -1;
-
- return 0;
-
-rx_qs_destroy:
- be_rx_queues_destroy(adapter);
-tx_qs_destroy:
- be_tx_queues_destroy(adapter);
-if_destroy:
- if (be_physfn(adapter) && adapter->sriov_enabled)
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- vf + 1);
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
-do_none:
- return status;
-}
-
-static int be_clear(struct be_adapter *adapter)
-{
- int vf;
-
- if (be_physfn(adapter) && adapter->sriov_enabled)
- be_vf_eth_addr_rem(adapter);
-
- be_mcc_queues_destroy(adapter);
- be_rx_queues_destroy(adapter);
- be_tx_queues_destroy(adapter);
- adapter->eq_next_idx = 0;
-
- if (be_physfn(adapter) && adapter->sriov_enabled)
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- vf + 1);
-
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
-
- adapter->be3_native = 0;
-
- /* tell fw we're done with firing cmds */
- be_cmd_fw_clean(adapter);
- return 0;
-}
-
-
-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p, u32 img_start, int image_size,
- int hdr_size)
-{
- u32 crc_offset;
- u8 flashed_crc[4];
- int status;
-
- crc_offset = hdr_size + img_start + image_size - 4;
-
- p += crc_offset;
-
- status = be_cmd_get_flash_crc(adapter, flashed_crc,
- (image_size - 4));
- if (status) {
- dev_err(&adapter->pdev->dev,
- "could not get crc from flash, not flashing redboot\n");
- return false;
- }
-
- /*update redboot only if crc does not match*/
- if (!memcmp(flashed_crc, p, 4))
- return false;
- else
- return true;
-}
-
-static bool phy_flashing_required(struct be_adapter *adapter)
-{
- int status = 0;
- struct be_phy_info phy_info;
-
- status = be_cmd_get_phy_info(adapter, &phy_info);
- if (status)
- return false;
- if ((phy_info.phy_type == TN_8022) &&
- (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
- return true;
- }
- return false;
-}
-
-static int be_flash_data(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
-
-{
- int status = 0, i, filehdr_size = 0;
- u32 total_bytes = 0, flash_op;
- int num_bytes;
- const u8 *p = fw->data;
- struct be_cmd_write_flashrom *req = flash_cmd->va;
- const struct flash_comp *pflashcomp;
- int num_comp;
-
- static const struct flash_comp gen3_flash_types[10] = {
- { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g3},
- { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
- FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
- { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g3},
- { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g3},
- { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g3},
- { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g3},
- { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g3},
- { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g3},
- { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
- FLASH_NCSI_IMAGE_MAX_SIZE_g3},
- { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
- FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
- };
- static const struct flash_comp gen2_flash_types[8] = {
- { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g2},
- { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
- FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
- { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g2},
- { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g2},
- { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
- FLASH_BIOS_IMAGE_MAX_SIZE_g2},
- { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g2},
- { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
- FLASH_IMAGE_MAX_SIZE_g2},
- { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
- FLASH_IMAGE_MAX_SIZE_g2}
- };
-
- if (adapter->generation == BE_GEN3) {
- pflashcomp = gen3_flash_types;
- filehdr_size = sizeof(struct flash_file_hdr_g3);
- num_comp = ARRAY_SIZE(gen3_flash_types);
- } else {
- pflashcomp = gen2_flash_types;
- filehdr_size = sizeof(struct flash_file_hdr_g2);
- num_comp = ARRAY_SIZE(gen2_flash_types);
- }
- for (i = 0; i < num_comp; i++) {
- if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
- memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
- continue;
- if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
- if (!phy_flashing_required(adapter))
- continue;
- }
- if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
- (!be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
- (num_of_images * sizeof(struct image_hdr)))))
- continue;
- p = fw->data;
- p += filehdr_size + pflashcomp[i].offset
- + (num_of_images * sizeof(struct image_hdr));
- if (p + pflashcomp[i].size > fw->data + fw->size)
- return -1;
- total_bytes = pflashcomp[i].size;
- while (total_bytes) {
- if (total_bytes > 32*1024)
- num_bytes = 32*1024;
- else
- num_bytes = total_bytes;
- total_bytes -= num_bytes;
- if (!total_bytes) {
- if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_FLASH;
- else
- flash_op = FLASHROM_OPER_FLASH;
- } else {
- if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
- flash_op = FLASHROM_OPER_PHY_SAVE;
- else
- flash_op = FLASHROM_OPER_SAVE;
- }
- memcpy(req->params.data_buf, p, num_bytes);
- p += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd,
- pflashcomp[i].optype, flash_op, num_bytes);
- if (status) {
- if ((status == ILLEGAL_IOCTL_REQ) &&
- (pflashcomp[i].optype ==
- IMG_TYPE_PHY_FW))
- break;
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed.\n");
- return -1;
- }
- }
- }
- return 0;
-}
-
-static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
-{
- if (fhdr == NULL)
- return 0;
- if (fhdr->build[0] == '3')
- return BE_GEN3;
- else if (fhdr->build[0] == '2')
- return BE_GEN2;
- else
- return 0;
-}
-
-static int lancer_fw_download(struct be_adapter *adapter,
- const struct firmware *fw)
-{
-#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
-#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
- struct be_dma_mem flash_cmd;
- const u8 *data_ptr = NULL;
- u8 *dest_image_ptr = NULL;
- size_t image_size = 0;
- u32 chunk_size = 0;
- u32 data_written = 0;
- u32 offset = 0;
- int status = 0;
- u8 add_status = 0;
-
- if (!IS_ALIGNED(fw->size, sizeof(u32))) {
- dev_err(&adapter->pdev->dev,
- "FW Image not properly aligned. "
- "Length must be 4 byte aligned.\n");
- status = -EINVAL;
- goto lancer_fw_exit;
- }
-
- flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
- + LANCER_FW_DOWNLOAD_CHUNK;
- flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
- if (!flash_cmd.va) {
- status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
- goto lancer_fw_exit;
- }
-
- dest_image_ptr = flash_cmd.va +
- sizeof(struct lancer_cmd_req_write_object);
- image_size = fw->size;
- data_ptr = fw->data;
-
- while (image_size) {
- chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
-
- /* Copy the image chunk content. */
- memcpy(dest_image_ptr, data_ptr, chunk_size);
-
- status = lancer_cmd_write_object(adapter, &flash_cmd,
- chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
-
- if (status)
- break;
-
- offset += data_written;
- data_ptr += data_written;
- image_size -= data_written;
- }
-
- if (!status) {
- /* Commit the FW written */
- status = lancer_cmd_write_object(adapter, &flash_cmd,
- 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
- }
-
- dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Firmware load error. "
- "Status code: 0x%x Additional Status: 0x%x\n",
- status, add_status);
- goto lancer_fw_exit;
- }
-
- dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
-lancer_fw_exit:
- return status;
-}
-
-static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
-{
- struct flash_file_hdr_g2 *fhdr;
- struct flash_file_hdr_g3 *fhdr3;
- struct image_hdr *img_hdr_ptr = NULL;
- struct be_dma_mem flash_cmd;
- const u8 *p;
- int status = 0, i = 0, num_imgs = 0;
-
- p = fw->data;
- fhdr = (struct flash_file_hdr_g2 *) p;
-
- flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
- flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
- if (!flash_cmd.va) {
- status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
- goto be_fw_exit;
- }
-
- if ((adapter->generation == BE_GEN3) &&
- (get_ufigen_type(fhdr) == BE_GEN3)) {
- fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
- num_imgs = le32_to_cpu(fhdr3->num_imgs);
- for (i = 0; i < num_imgs; i++) {
- img_hdr_ptr = (struct image_hdr *) (fw->data +
- (sizeof(struct flash_file_hdr_g3) +
- i * sizeof(struct image_hdr)));
- if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
- status = be_flash_data(adapter, fw, &flash_cmd,
- num_imgs);
- }
- } else if ((adapter->generation == BE_GEN2) &&
- (get_ufigen_type(fhdr) == BE_GEN2)) {
- status = be_flash_data(adapter, fw, &flash_cmd, 0);
- } else {
- dev_err(&adapter->pdev->dev,
- "UFI and Interface are not compatible for flashing\n");
- status = -1;
- }
-
- dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
- if (status) {
- dev_err(&adapter->pdev->dev, "Firmware load error\n");
- goto be_fw_exit;
- }
-
- dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
-
-be_fw_exit:
- return status;
-}
-
-int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
-{
- const struct firmware *fw;
- int status;
-
- if (!netif_running(adapter->netdev)) {
- dev_err(&adapter->pdev->dev,
- "Firmware load not allowed (interface is down)\n");
- return -1;
- }
-
- status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
- if (status)
- goto fw_exit;
-
- dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
-
- if (lancer_chip(adapter))
- status = lancer_fw_download(adapter, fw);
- else
- status = be_fw_download(adapter, fw);
-
-fw_exit:
- release_firmware(fw);
- return status;
-}
-
-static struct net_device_ops be_netdev_ops = {
- .ndo_open = be_open,
- .ndo_stop = be_close,
- .ndo_start_xmit = be_xmit,
- .ndo_set_rx_mode = be_set_multicast_list,
- .ndo_set_mac_address = be_mac_addr_set,
- .ndo_change_mtu = be_change_mtu,
- .ndo_get_stats64 = be_get_stats64,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_add_vid = be_vlan_add_vid,
- .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
- .ndo_set_vf_mac = be_set_vf_mac,
- .ndo_set_vf_vlan = be_set_vf_vlan,
- .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
- .ndo_get_vf_config = be_get_vf_config
-};
-
-static void be_netdev_init(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct be_rx_obj *rxo;
- int i;
-
- netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_TX;
- if (be_multi_rxq(adapter))
- netdev->hw_features |= NETIF_F_RXHASH;
-
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
-
- netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-
- netdev->flags |= IFF_MULTICAST;
-
- /* Default settings for Rx and Tx flow control */
- adapter->rx_fc = true;
- adapter->tx_fc = true;
-
- netif_set_gso_max_size(netdev, 65535);
-
- BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
-
- SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
-
- for_all_rx_queues(adapter, rxo, i)
- netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
- BE_NAPI_WEIGHT);
-
- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
- BE_NAPI_WEIGHT);
-}
-
-static void be_unmap_pci_bars(struct be_adapter *adapter)
-{
- if (adapter->csr)
- iounmap(adapter->csr);
- if (adapter->db)
- iounmap(adapter->db);
- if (adapter->pcicfg && be_physfn(adapter))
- iounmap(adapter->pcicfg);
-}
-
-static int be_map_pci_bars(struct be_adapter *adapter)
-{
- u8 __iomem *addr;
- int pcicfg_reg, db_reg;
-
- if (lancer_chip(adapter)) {
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
- pci_resource_len(adapter->pdev, 0));
- if (addr == NULL)
- return -ENOMEM;
- adapter->db = addr;
- return 0;
- }
-
- if (be_physfn(adapter)) {
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
- pci_resource_len(adapter->pdev, 2));
- if (addr == NULL)
- return -ENOMEM;
- adapter->csr = addr;
- }
-
- if (adapter->generation == BE_GEN2) {
- pcicfg_reg = 1;
- db_reg = 4;
- } else {
- pcicfg_reg = 0;
- if (be_physfn(adapter))
- db_reg = 4;
- else
- db_reg = 0;
- }
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
- pci_resource_len(adapter->pdev, db_reg));
- if (addr == NULL)
- goto pci_map_err;
- adapter->db = addr;
-
- if (be_physfn(adapter)) {
- addr = ioremap_nocache(
- pci_resource_start(adapter->pdev, pcicfg_reg),
- pci_resource_len(adapter->pdev, pcicfg_reg));
- if (addr == NULL)
- goto pci_map_err;
- adapter->pcicfg = addr;
- } else
- adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
-
- return 0;
-pci_map_err:
- be_unmap_pci_bars(adapter);
- return -ENOMEM;
-}
-
-
-static void be_ctrl_cleanup(struct be_adapter *adapter)
-{
- struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
-
- be_unmap_pci_bars(adapter);
-
- if (mem->va)
- dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
- mem->dma);
-
- mem = &adapter->rx_filter;
- if (mem->va)
- dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
- mem->dma);
-}
-
-static int be_ctrl_init(struct be_adapter *adapter)
-{
- struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
- struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
- struct be_dma_mem *rx_filter = &adapter->rx_filter;
- int status;
-
- status = be_map_pci_bars(adapter);
- if (status)
- goto done;
-
- mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
- mbox_mem_alloc->size,
- &mbox_mem_alloc->dma,
- GFP_KERNEL);
- if (!mbox_mem_alloc->va) {
- status = -ENOMEM;
- goto unmap_pci_bars;
- }
- mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
- mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
- mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
- memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
-
- rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- &rx_filter->dma, GFP_KERNEL);
- if (rx_filter->va == NULL) {
- status = -ENOMEM;
- goto free_mbox;
- }
- memset(rx_filter->va, 0, rx_filter->size);
-
- mutex_init(&adapter->mbox_lock);
- spin_lock_init(&adapter->mcc_lock);
- spin_lock_init(&adapter->mcc_cq_lock);
-
- init_completion(&adapter->flash_compl);
- pci_save_state(adapter->pdev);
- return 0;
-
-free_mbox:
- dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
- mbox_mem_alloc->va, mbox_mem_alloc->dma);
-
-unmap_pci_bars:
- be_unmap_pci_bars(adapter);
-
-done:
- return status;
-}
-
-static void be_stats_cleanup(struct be_adapter *adapter)
-{
- struct be_dma_mem *cmd = &adapter->stats_cmd;
-
- if (cmd->va)
- dma_free_coherent(&adapter->pdev->dev, cmd->size,
- cmd->va, cmd->dma);
-}
-
-static int be_stats_init(struct be_adapter *adapter)
-{
- struct be_dma_mem *cmd = &adapter->stats_cmd;
-
- if (adapter->generation == BE_GEN2) {
- cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
- } else {
- if (lancer_chip(adapter))
- cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
- else
- cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
- }
- cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL);
- if (cmd->va == NULL)
- return -1;
- memset(cmd->va, 0, cmd->size);
- return 0;
-}
-
-static void __devexit be_remove(struct pci_dev *pdev)
-{
- struct be_adapter *adapter = pci_get_drvdata(pdev);
-
- if (!adapter)
- return;
-
- cancel_delayed_work_sync(&adapter->work);
-
- unregister_netdev(adapter->netdev);
-
- be_clear(adapter);
-
- be_stats_cleanup(adapter);
-
- be_ctrl_cleanup(adapter);
-
- kfree(adapter->vf_cfg);
- be_sriov_disable(adapter);
-
- be_msix_disable(adapter);
-
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
- free_netdev(adapter->netdev);
-}
-
-static int be_get_config(struct be_adapter *adapter)
-{
- int status;
- u8 mac[ETH_ALEN];
-
- status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
- if (status)
- return status;
-
- status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
- &adapter->function_mode, &adapter->function_caps);
- if (status)
- return status;
-
- memset(mac, 0, ETH_ALEN);
-
- /* A default permanent address is given to each VF for Lancer*/
- if (be_physfn(adapter) || lancer_chip(adapter)) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
-
- if (status)
- return status;
-
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
-
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
-
- if (adapter->function_mode & 0x400)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
- else
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
-
- status = be_cmd_get_cntl_attributes(adapter);
- if (status)
- return status;
-
- if ((num_vfs && adapter->sriov_enabled) ||
- (adapter->function_mode & 0x400) ||
- lancer_chip(adapter) || !be_physfn(adapter)) {
- adapter->num_tx_qs = 1;
- netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_qs);
- } else {
- adapter->num_tx_qs = MAX_TX_QS;
- }
-
- return 0;
-}
-
-static int be_dev_family_check(struct be_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- u32 sli_intf = 0, if_type;
-
- switch (pdev->device) {
- case BE_DEVICE_ID1:
- case OC_DEVICE_ID1:
- adapter->generation = BE_GEN2;
- break;
- case BE_DEVICE_ID2:
- case OC_DEVICE_ID2:
- adapter->generation = BE_GEN3;
- break;
- case OC_DEVICE_ID3:
- case OC_DEVICE_ID4:
- pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
- if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
- SLI_INTF_IF_TYPE_SHIFT;
-
- if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
- if_type != 0x02) {
- dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
- return -EINVAL;
- }
- adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
- SLI_INTF_FAMILY_SHIFT);
- adapter->generation = BE_GEN3;
- break;
- default:
- adapter->generation = 0;
- }
- return 0;
-}
-
-static int lancer_wait_ready(struct be_adapter *adapter)
-{
-#define SLIPORT_READY_TIMEOUT 500
- u32 sliport_status;
- int status = 0, i;
-
- for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- if (sliport_status & SLIPORT_STATUS_RDY_MASK)
- break;
-
- msleep(20);
- }
-
- if (i == SLIPORT_READY_TIMEOUT)
- status = -1;
-
- return status;
-}
-
-static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
- int status;
- u32 sliport_status, err, reset_needed;
- status = lancer_wait_ready(adapter);
- if (!status) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- err = sliport_status & SLIPORT_STATUS_ERR_MASK;
- reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
- if (err && reset_needed) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
-
- /* check adapter has corrected the error */
- status = lancer_wait_ready(adapter);
- sliport_status = ioread32(adapter->db +
- SLIPORT_STATUS_OFFSET);
- sliport_status &= (SLIPORT_STATUS_ERR_MASK |
- SLIPORT_STATUS_RN_MASK);
- if (status || sliport_status)
- status = -1;
- } else if (err || reset_needed) {
- status = -1;
- }
- }
- return status;
-}
-
-static int __devinit be_probe(struct pci_dev *pdev,
- const struct pci_device_id *pdev_id)
-{
- int status = 0;
- struct be_adapter *adapter;
- struct net_device *netdev;
-
- status = pci_enable_device(pdev);
- if (status)
- goto do_none;
-
- status = pci_request_regions(pdev, DRV_NAME);
- if (status)
- goto disable_dev;
- pci_set_master(pdev);
-
- netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
- if (netdev == NULL) {
- status = -ENOMEM;
- goto rel_reg;
- }
- adapter = netdev_priv(netdev);
- adapter->pdev = pdev;
- pci_set_drvdata(pdev, adapter);
-
- status = be_dev_family_check(adapter);
- if (status)
- goto free_netdev;
-
- adapter->netdev = netdev;
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!status) {
- netdev->features |= NETIF_F_HIGHDMA;
- } else {
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (status) {
- dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
- goto free_netdev;
- }
- }
-
- be_sriov_enable(adapter);
- if (adapter->sriov_enabled) {
- adapter->vf_cfg = kcalloc(num_vfs,
- sizeof(struct be_vf_cfg), GFP_KERNEL);
-
- if (!adapter->vf_cfg)
- goto free_netdev;
- }
-
- status = be_ctrl_init(adapter);
- if (status)
- goto free_vf_cfg;
-
- if (lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
- if (status) {
- dev_err(&pdev->dev, "Adapter in non recoverable error\n");
- goto ctrl_clean;
- }
- }
-
- /* sync up with fw's ready state */
- if (be_physfn(adapter)) {
- status = be_cmd_POST(adapter);
- if (status)
- goto ctrl_clean;
- }
-
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
- if (status)
- goto ctrl_clean;
-
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
-
- status = be_stats_init(adapter);
- if (status)
- goto ctrl_clean;
-
- status = be_get_config(adapter);
- if (status)
- goto stats_clean;
-
- /* The INTR bit may be set in the card when probed by a kdump kernel
- * after a crash.
- */
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
-
- be_msix_enable(adapter);
-
- INIT_DELAYED_WORK(&adapter->work, be_worker);
-
- status = be_setup(adapter);
- if (status)
- goto msix_disable;
-
- be_netdev_init(netdev);
- status = register_netdev(netdev);
- if (status != 0)
- goto unsetup;
-
- if (be_physfn(adapter) && adapter->sriov_enabled) {
- u8 mac_speed;
- u16 vf, lnk_speed;
-
- if (!lancer_chip(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto unreg_netdev;
- }
-
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_link_status_query(adapter, &mac_speed,
- &lnk_speed, vf + 1);
- if (!status)
- adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
- else
- goto unreg_netdev;
- }
- }
-
- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
-
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
- return 0;
-
-unreg_netdev:
- unregister_netdev(netdev);
-unsetup:
- be_clear(adapter);
-msix_disable:
- be_msix_disable(adapter);
-stats_clean:
- be_stats_cleanup(adapter);
-ctrl_clean:
- be_ctrl_cleanup(adapter);
-free_vf_cfg:
- kfree(adapter->vf_cfg);
-free_netdev:
- be_sriov_disable(adapter);
- free_netdev(netdev);
- pci_set_drvdata(pdev, NULL);
-rel_reg:
- pci_release_regions(pdev);
-disable_dev:
- pci_disable_device(pdev);
-do_none:
- dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
- return status;
-}
-
-static int be_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
-
- cancel_delayed_work_sync(&adapter->work);
- if (adapter->wol)
- be_setup_wol(adapter, true);
-
- netif_device_detach(netdev);
- if (netif_running(netdev)) {
- rtnl_lock();
- be_close(netdev);
- rtnl_unlock();
- }
- be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
- be_clear(adapter);
-
- be_msix_disable(adapter);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int be_resume(struct pci_dev *pdev)
-{
- int status = 0;
- struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
-
- netif_device_detach(netdev);
-
- status = pci_enable_device(pdev);
- if (status)
- return status;
-
- pci_set_power_state(pdev, 0);
- pci_restore_state(pdev);
-
- be_msix_enable(adapter);
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
- if (status)
- return status;
-
- be_setup(adapter);
- if (netif_running(netdev)) {
- rtnl_lock();
- be_open(netdev);
- rtnl_unlock();
- }
- netif_device_attach(netdev);
-
- if (adapter->wol)
- be_setup_wol(adapter, false);
-
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
- return 0;
-}
-
-/*
- * An FLR will stop BE from DMAing any data.
- */
-static void be_shutdown(struct pci_dev *pdev)
-{
- struct be_adapter *adapter = pci_get_drvdata(pdev);
-
- if (!adapter)
- return;
-
- cancel_delayed_work_sync(&adapter->work);
-
- netif_device_detach(adapter->netdev);
-
- if (adapter->wol)
- be_setup_wol(adapter, true);
-
- be_cmd_reset_function(adapter);
-
- pci_disable_device(pdev);
-}
-
-static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
-
- dev_err(&adapter->pdev->dev, "EEH error detected\n");
-
- adapter->eeh_err = true;
-
- netif_device_detach(netdev);
-
- if (netif_running(netdev)) {
- rtnl_lock();
- be_close(netdev);
- rtnl_unlock();
- }
- be_clear(adapter);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- pci_disable_device(pdev);
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
-{
- struct be_adapter *adapter = pci_get_drvdata(pdev);
- int status;
-
- dev_info(&adapter->pdev->dev, "EEH reset\n");
- adapter->eeh_err = false;
-
- status = pci_enable_device(pdev);
- if (status)
- return PCI_ERS_RESULT_DISCONNECT;
-
- pci_set_master(pdev);
- pci_set_power_state(pdev, 0);
- pci_restore_state(pdev);
-
- /* Check if card is ok and fw is ready */
- status = be_cmd_POST(adapter);
- if (status)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void be_eeh_resume(struct pci_dev *pdev)
-{
- int status = 0;
- struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
-
- dev_info(&adapter->pdev->dev, "EEH resume\n");
-
- pci_save_state(pdev);
-
- /* tell fw we're ready to fire cmds */
- status = be_cmd_fw_init(adapter);
- if (status)
- goto err;
-
- status = be_setup(adapter);
- if (status)
- goto err;
-
- if (netif_running(netdev)) {
- status = be_open(netdev);
- if (status)
- goto err;
- }
- netif_device_attach(netdev);
- return;
-err:
- dev_err(&adapter->pdev->dev, "EEH resume failed\n");
-}
-
-static struct pci_error_handlers be_eeh_handlers = {
- .error_detected = be_eeh_err_detected,
- .slot_reset = be_eeh_reset,
- .resume = be_eeh_resume,
-};
-
-static struct pci_driver be_driver = {
- .name = DRV_NAME,
- .id_table = be_dev_ids,
- .probe = be_probe,
- .remove = be_remove,
- .suspend = be_suspend,
- .resume = be_resume,
- .shutdown = be_shutdown,
- .err_handler = &be_eeh_handlers
-};
-
-static int __init be_init_module(void)
-{
- if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
- rx_frag_size != 2048) {
- printk(KERN_WARNING DRV_NAME
- " : Module param rx_frag_size must be 2048/4096/8192."
- " Using 2048\n");
- rx_frag_size = 2048;
- }
-
- return pci_register_driver(&be_driver);
-}
-module_init(be_init_module);
-
-static void __exit be_exit_module(void)
-{
- pci_unregister_driver(&be_driver);
-}
-module_exit(be_exit_module);
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/emulex/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
+obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
--- /dev/null
+#
+# Emulex driver configuration
+#
+
+config NET_VENDOR_EMULEX
+ bool "Emulex devices"
+ depends on PCI && INET
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Emulex cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_EMULEX
+
+source "drivers/net/ethernet/emulex/benet/Kconfig"
+
+endif # NET_VENDOR_EMULEX
--- /dev/null
+#
+# Makefile for the Emulex device drivers.
+#
+
+obj-$(CONFIG_BE2NET) += benet/
--- /dev/null
+config BE2NET
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
+ depends on PCI && INET
+ ---help---
+ This driver implements the NIC functionality for ServerEngines'
+ 10Gbps network adapter - BladeEngine.
--- /dev/null
+#
+# Makefile to build the network driver for ServerEngine's BladeEngine.
+#
+
+obj-$(CONFIG_BE2NET) += be2net.o
+
+be2net-y := be_main.o be_cmds.o be_ethtool.o
--- /dev/null
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef BE_H
+#define BE_H
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <net/tcp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
+
+#include "be_hw.h"
+
+#define DRV_VER "4.0.100u"
+#define DRV_NAME "be2net"
+#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
+#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
+#define OC_NAME "Emulex OneConnect 10Gbps NIC"
+#define OC_NAME_BE OC_NAME "(be3)"
+#define OC_NAME_LANCER OC_NAME "(Lancer)"
+#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
+
+#define BE_VENDOR_ID 0x19a2
+#define EMULEX_VENDOR_ID 0x10df
+#define BE_DEVICE_ID1 0x211
+#define BE_DEVICE_ID2 0x221
+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
+#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
+
+static inline char *nic_name(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case OC_DEVICE_ID1:
+ return OC_NAME;
+ case OC_DEVICE_ID2:
+ return OC_NAME_BE;
+ case OC_DEVICE_ID3:
+ case OC_DEVICE_ID4:
+ return OC_NAME_LANCER;
+ case BE_DEVICE_ID2:
+ return BE3_NAME;
+ default:
+ return BE_NAME;
+ }
+}
+
+/* Number of bytes of an RX frame that are copied to skb->data */
+#define BE_HDR_LEN ((u16) 64)
+#define BE_MAX_JUMBO_FRAME_SIZE 9018
+#define BE_MIN_MTU 256
+
+#define BE_NUM_VLANS_SUPPORTED 64
+#define BE_MAX_EQD 96
+#define BE_MAX_TX_FRAG_COUNT 30
+
+#define EVNT_Q_LEN 1024
+#define TX_Q_LEN 2048
+#define TX_CQ_LEN 1024
+#define RX_Q_LEN 1024 /* Does not support any other value */
+#define RX_CQ_LEN 1024
+#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
+#define MCC_CQ_LEN 256
+
+#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
+#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define MAX_TX_QS 8
+#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
+#define BE_NAPI_WEIGHT 64
+#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
+#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
+
+#define FW_VER_LEN 32
+
+struct be_dma_mem {
+ void *va;
+ dma_addr_t dma;
+ u32 size;
+};
+
+struct be_queue_info {
+ struct be_dma_mem dma_mem;
+ u16 len;
+ u16 entry_size; /* Size of an element in the queue */
+ u16 id;
+ u16 tail, head;
+ bool created;
+ atomic_t used; /* Number of valid elements in the queue */
+};
+
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ BUG_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_adv(u16 *index, u16 val, u16 limit)
+{
+ *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ /* Adaptive interrupt coalescing (AIC) info */
+ bool enable_aic;
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+ u8 eq_idx;
+
+ struct napi_struct napi;
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ bool rearm_cq;
+};
+
+struct be_tx_stats {
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_reqs;
+ u64 tx_wrbs;
+ u64 tx_compl;
+ ulong tx_jiffies;
+ u32 tx_stops;
+ struct u64_stats_sync sync;
+ struct u64_stats_sync sync_compl;
+};
+
+struct be_tx_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ /* Remember the skbs that were transmitted */
+ struct sk_buff *sent_skb_list[TX_Q_LEN];
+ struct be_tx_stats stats;
+};
+
+/* Struct to remember the pages posted for rx frags */
+struct be_rx_page_info {
+ struct page *page;
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ u16 page_offset;
+ bool last_page_user;
+};
+
+struct be_rx_stats {
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_pkts_prev;
+ ulong rx_jiffies;
+ u32 rx_drops_no_skbs; /* skb allocation errors */
+ u32 rx_drops_no_frags; /* HW has no fetched frags */
+ u32 rx_post_fail; /* page post alloc failures */
+ u32 rx_polls; /* NAPI calls */
+ u32 rx_events;
+ u32 rx_compl;
+ u32 rx_mcast_pkts;
+ u32 rx_compl_err; /* completions with err set */
+ u32 rx_pps; /* pkts per second */
+ struct u64_stats_sync sync;
+};
+
+struct be_rx_compl_info {
+ u32 rss_hash;
+ u16 vlan_tag;
+ u16 pkt_size;
+ u16 rxq_idx;
+ u16 port;
+ u8 vlanf;
+ u8 num_rcvd;
+ u8 err;
+ u8 ipf;
+ u8 tcpf;
+ u8 udpf;
+ u8 ip_csum;
+ u8 l4_csum;
+ u8 ipv6;
+ u8 vtm;
+ u8 pkt_type;
+};
+
+struct be_rx_obj {
+ struct be_adapter *adapter;
+ struct be_queue_info q;
+ struct be_queue_info cq;
+ struct be_rx_compl_info rxcp;
+ struct be_rx_page_info page_info_tbl[RX_Q_LEN];
+ struct be_eq_obj rx_eq;
+ struct be_rx_stats stats;
+ u8 rss_id;
+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
+ u32 cache_line_barrier[16];
+};
+
+struct be_drv_stats {
+ u8 be_on_die_temperature;
+ u32 tx_events;
+ u32 eth_red_drops;
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_match_errors;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+};
+
+struct be_vf_cfg {
+ unsigned char vf_mac_addr[ETH_ALEN];
+ u32 vf_if_handle;
+ u32 vf_pmac_id;
+ u16 vf_vlan_tag;
+ u32 vf_tx_rate;
+};
+
+#define BE_INVALID_PMAC_ID 0xffffffff
+
+struct be_adapter {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+
+ u8 __iomem *csr;
+ u8 __iomem *db; /* Door Bell */
+ u8 __iomem *pcicfg; /* PCI config space */
+
+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
+ struct be_dma_mem mbox_mem;
+ /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
+ * is stored for freeing purpose */
+ struct be_dma_mem mbox_mem_alloced;
+
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
+ u32 num_msix_vec;
+ bool isr_registered;
+
+ /* TX Rings */
+ struct be_eq_obj tx_eq;
+ struct be_tx_obj tx_obj[MAX_TX_QS];
+ u8 num_tx_qs;
+
+ u32 cache_line_break[8];
+
+ /* Rx rings */
+ struct be_rx_obj rx_obj[MAX_RX_QS];
+ u32 num_rx_qs;
+ u32 big_page_size; /* Compounded page size shared by rx wrbs */
+
+ u8 eq_next_idx;
+ struct be_drv_stats drv_stats;
+
+ u16 vlans_added;
+ u16 max_vlans; /* Number of vlans supported */
+ u8 vlan_tag[VLAN_N_VID];
+ u8 vlan_prio_bmap; /* Available Priority BitMap */
+ u16 recommended_prio; /* Recommended Priority */
+ struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
+
+ struct be_dma_mem stats_cmd;
+ /* Work queue used to perform periodic tasks like getting statistics */
+ struct delayed_work work;
+ u16 work_counter;
+
+ /* Ethtool knobs and info */
+ char fw_ver[FW_VER_LEN];
+ u32 if_handle; /* Used to configure filtering */
+ u32 pmac_id; /* MAC addr handle used by BE card */
+ u32 beacon_state; /* for set_phys_id */
+
+ bool eeh_err;
+ bool link_up;
+ u32 port_num;
+ bool promiscuous;
+ bool wol;
+ u32 function_mode;
+ u32 function_caps;
+ u32 rx_fc; /* Rx flow control */
+ u32 tx_fc; /* Tx flow control */
+ bool ue_detected;
+ bool stats_cmd_sent;
+ int link_speed;
+ u8 port_type;
+ u8 transceiver;
+ u8 autoneg;
+ u8 generation; /* BladeEngine ASIC generation */
+ u32 flash_status;
+ struct completion flash_compl;
+
+ bool be3_native;
+ bool sriov_enabled;
+ struct be_vf_cfg *vf_cfg;
+ u8 is_virtfn;
+ u32 sli_family;
+ u8 hba_port_num;
+ u16 pvid;
+};
+
+#define be_physfn(adapter) (!adapter->is_virtfn)
+
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
+
+#define ON 1
+#define OFF 0
+#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
+ (adapter->pdev->device == OC_DEVICE_ID4))
+
+extern const struct ethtool_ops be_ethtool_ops;
+
+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
+#define tx_stats(txo) (&txo->stats)
+#define rx_stats(rxo) (&rxo->stats)
+
+#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
+
+#define for_all_rx_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
+ i++, rxo++)
+
+/* Just skip the first default non-rss queue */
+#define for_all_rss_queues(adapter, rxo, i) \
+ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
+ i++, rxo++)
+
+#define for_all_tx_queues(adapter, txo, i) \
+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
+ i++, txo++)
+
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+
+/* Returns number of pages spanned by the data starting at the given addr */
+#define PAGES_4K_SPANNED(_address, size) \
+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
+ (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
+
+/* Byte offset into the page corresponding to given address */
+#define OFFSET_IN_PAGE(addr) \
+ ((size_t)(addr) & (PAGE_SIZE_4K-1))
+
+/* Returns bit offset within a DWORD of a bitfield */
+#define AMAP_BIT_OFFSET(_struct, field) \
+ (((size_t)&(((_struct *)0)->field))%32)
+
+/* Returns the bit mask of the field that is NOT shifted into location. */
+static inline u32 amap_mask(u32 bitsize)
+{
+ return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
+}
+
+static inline void
+amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
+{
+ u32 *dw = (u32 *) ptr + dw_offset;
+ *dw &= ~(mask << offset);
+ *dw |= (mask & value) << offset;
+}
+
+#define AMAP_SET_BITS(_struct, field, ptr, val) \
+ amap_set(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field), \
+ val)
+
+static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
+{
+ u32 *dw = (u32 *) ptr;
+ return mask & (*(dw + dw_offset) >> offset);
+}
+
+#define AMAP_GET_BITS(_struct, field, ptr) \
+ amap_get(ptr, \
+ offsetof(_struct, field)/32, \
+ amap_mask(sizeof(((_struct *)0)->field)), \
+ AMAP_BIT_OFFSET(_struct, field))
+
+#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
+#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
+static inline void swap_dws(void *wrb, int len)
+{
+#ifdef __BIG_ENDIAN
+ u32 *dw = wrb;
+ BUG_ON(len % 4);
+ do {
+ *dw = cpu_to_le32(*dw);
+ dw++;
+ len -= 4;
+ } while (len);
+#endif /* __BIG_ENDIAN */
+}
+
+static inline u8 is_tcp_pkt(struct sk_buff *skb)
+{
+ u8 val = 0;
+
+ if (ip_hdr(skb)->version == 4)
+ val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else if (ip_hdr(skb)->version == 6)
+ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
+
+ return val;
+}
+
+static inline u8 is_udp_pkt(struct sk_buff *skb)
+{
+ u8 val = 0;
+
+ if (ip_hdr(skb)->version == 4)
+ val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
+ else if (ip_hdr(skb)->version == 6)
+ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
+
+ return val;
+}
+
+static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
+{
+ u32 sli_intf;
+
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+}
+
+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
+{
+ u32 addr;
+
+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
+
+ mac[5] = (u8)(addr & 0xFF);
+ mac[4] = (u8)((addr >> 8) & 0xFF);
+ mac[3] = (u8)((addr >> 16) & 0xFF);
+ /* Use the OUI from the current MAC address */
+ memcpy(mac, adapter->netdev->dev_addr, 3);
+}
+
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
+{
+ return adapter->num_rx_qs > 1;
+}
+
+extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+ u16 num_popped);
+extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_parse_stats(struct be_adapter *adapter);
+extern int be_load_fw(struct be_adapter *adapter, u8 *func);
+#endif /* BE_H */
--- /dev/null
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include "be.h"
+#include "be_cmds.h"
+
+/* Must be a power of 2 or else MODULO will BUG_ON */
+static int be_get_temp_freq = 32;
+
+static void be_mcc_notify(struct be_adapter *adapter)
+{
+ struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ u32 val = 0;
+
+ if (adapter->eeh_err) {
+ dev_info(&adapter->pdev->dev,
+ "Error in Card Detected! Cannot issue commands\n");
+ return;
+ }
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian) */
+static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
+{
+ compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ u16 compl_status, extd_status;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+
+ if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
+ (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
+ adapter->flash_status = compl_status;
+ complete(&adapter->flash_compl);
+ }
+
+ if (compl_status == MCC_STATUS_SUCCESS) {
+ if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
+ (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
+ (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
+ be_parse_stats(adapter);
+ adapter->stats_cmd_sent = false;
+ }
+ } else {
+ if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
+ compl_status == MCC_STATUS_ILLEGAL_REQUEST)
+ goto done;
+
+ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+ dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
+ "permitted to execute this cmd (opcode %d)\n",
+ compl->tag0);
+ } else {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
+ "status %d, extd-status %d\n",
+ compl->tag0, compl_status, extd_status);
+ }
+ }
+done:
+ return compl_status;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_adapter *adapter,
+ struct be_async_event_link_state *evt)
+{
+ be_link_status_update(adapter, evt->port_link_status);
+}
+
+/* Grp5 CoS Priority evt */
+static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_cos_priority *evt)
+{
+ if (evt->valid) {
+ adapter->vlan_prio_bmap = evt->available_priority_bmap;
+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
+ adapter->recommended_prio =
+ evt->reco_default_priority << VLAN_PRIO_SHIFT;
+ }
+}
+
+/* Grp5 QOS Speed evt */
+static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_qos_link_speed *evt)
+{
+ if (evt->physical_port == adapter->port_num) {
+ /* qos_link_speed is in units of 10 Mbps */
+ adapter->link_speed = evt->qos_link_speed * 10;
+ }
+}
+
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_pvid_state *evt)
+{
+ if (evt->enabled)
+ adapter->pvid = le16_to_cpu(evt->tag);
+ else
+ adapter->pvid = 0;
+}
+
+static void be_async_grp5_evt_process(struct be_adapter *adapter,
+ u32 trailer, struct be_mcc_compl *evt)
+{
+ u8 event_type = 0;
+
+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+ ASYNC_TRAILER_EVENT_TYPE_MASK;
+
+ switch (event_type) {
+ case ASYNC_EVENT_COS_PRIORITY:
+ be_async_grp5_cos_priority_process(adapter,
+ (struct be_async_event_grp5_cos_priority *)evt);
+ break;
+ case ASYNC_EVENT_QOS_SPEED:
+ be_async_grp5_qos_speed_process(adapter,
+ (struct be_async_event_grp5_qos_link_speed *)evt);
+ break;
+ case ASYNC_EVENT_PVID_STATE:
+ be_async_grp5_pvid_state_process(adapter,
+ (struct be_async_event_grp5_pvid_state *)evt);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
+ break;
+ }
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+ return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE;
+}
+
+static inline bool is_grp5_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_GRP_5);
+}
+
+static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
+{
+ struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
+ struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+void be_async_mcc_enable(struct be_adapter *adapter)
+{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
+ adapter->mcc_obj.rearm_cq = true;
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+}
+
+void be_async_mcc_disable(struct be_adapter *adapter)
+{
+ adapter->mcc_obj.rearm_cq = false;
+}
+
+int be_process_mcc(struct be_adapter *adapter, int *status)
+{
+ struct be_mcc_compl *compl;
+ int num = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
+ spin_lock_bh(&adapter->mcc_cq_lock);
+ while ((compl = be_mcc_compl_get(adapter))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter,
+ (struct be_async_event_link_state *) compl);
+ else if (is_grp5_evt(compl->flags))
+ be_async_grp5_evt_process(adapter,
+ compl->flags, compl);
+ } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+ *status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+ return num;
+}
+
+/* Wait till no more pending mcc requests are present */
+static int be_mcc_wait_compl(struct be_adapter *adapter)
+{
+#define mcc_timeout 120000 /* 12s timeout */
+ int i, num, status = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ for (i = 0; i < mcc_timeout; i++) {
+ num = be_process_mcc(adapter, &status);
+ if (num)
+ be_cq_notify(adapter, mcc_obj->cq.id,
+ mcc_obj->rearm_cq, num);
+
+ if (atomic_read(&mcc_obj->q.used) == 0)
+ break;
+ udelay(100);
+ }
+ if (i == mcc_timeout) {
+ dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+ return -1;
+ }
+ return status;
+}
+
+/* Notify MCC requests and wait for completion */
+static int be_mcc_notify_wait(struct be_adapter *adapter)
+{
+ be_mcc_notify(adapter);
+ return be_mcc_wait_compl(adapter);
+}
+
+static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
+{
+ int msecs = 0;
+ u32 ready;
+
+ if (adapter->eeh_err) {
+ dev_err(&adapter->pdev->dev,
+ "Error detected in card.Cannot issue commands\n");
+ return -EIO;
+ }
+
+ do {
+ ready = ioread32(db);
+ if (ready == 0xffffffff) {
+ dev_err(&adapter->pdev->dev,
+ "pci slot disconnected\n");
+ return -1;
+ }
+
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
+ if (ready)
+ break;
+
+ if (msecs > 4000) {
+ dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+ if (!lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+ return -1;
+ }
+
+ msleep(1);
+ msecs++;
+ } while (true);
+
+ return 0;
+}
+
+/*
+ * Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
+ */
+static int be_mbox_notify_wait(struct be_adapter *adapter)
+{
+ int status;
+ u32 val = 0;
+ void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
+ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
+ struct be_mcc_mailbox *mbox = mbox_mem->va;
+ struct be_mcc_compl *compl = &mbox->compl;
+
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ val |= MPU_MAILBOX_DB_HI_MASK;
+ /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
+ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
+ iowrite32(val, db);
+
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ val = 0;
+ /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
+ val |= (u32)(mbox_mem->dma >> 4) << 2;
+ iowrite32(val, db);
+
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(compl)) {
+ status = be_mcc_compl_process(adapter, &mbox->compl);
+ be_mcc_compl_use(compl);
+ if (status)
+ return status;
+ } else {
+ dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
+{
+ u32 sem;
+
+ if (lancer_chip(adapter))
+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
+ else
+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+
+ *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
+ if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
+ return -1;
+ else
+ return 0;
+}
+
+int be_cmd_POST(struct be_adapter *adapter)
+{
+ u16 stage;
+ int status, timeout = 0;
+ struct device *dev = &adapter->pdev->dev;
+
+ do {
+ status = be_POST_stage_get(adapter, &stage);
+ if (status) {
+ dev_err(dev, "POST error; stage=0x%x\n", stage);
+ return -1;
+ } else if (stage != POST_STAGE_ARMFW_RDY) {
+ if (msleep_interruptible(2000)) {
+ dev_err(dev, "Waiting for POST aborted\n");
+ return -EINTR;
+ }
+ timeout += 2;
+ } else {
+ return 0;
+ }
+ } while (timeout < 40);
+
+ dev_err(dev, "POST timeout; stage=0x%x\n", stage);
+ return -1;
+}
+
+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
+{
+ return wrb->payload.embedded_payload;
+}
+
+static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
+{
+ return &wrb->payload.sgl[0];
+}
+
+/* Don't touch the hdr after it's prepared */
+static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+ bool embedded, u8 sge_cnt, u32 opcode)
+{
+ if (embedded)
+ wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
+ else
+ wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
+ MCC_WRB_SGE_CNT_SHIFT;
+ wrb->payload_length = payload_len;
+ wrb->tag0 = opcode;
+ be_dws_cpu_to_le(wrb, 8);
+}
+
+/* Don't touch the hdr after it's prepared */
+static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
+ u8 subsystem, u8 opcode, int cmd_len)
+{
+ req_hdr->opcode = opcode;
+ req_hdr->subsystem = subsystem;
+ req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+ req_hdr->version = 0;
+}
+
+static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
+ struct be_dma_mem *mem)
+{
+ int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
+ u64 dma = (u64)mem->dma;
+
+ for (i = 0; i < buf_pages; i++) {
+ pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
+ pages[i].hi = cpu_to_le32(upper_32_bits(dma));
+ dma += PAGE_SIZE_4K;
+ }
+}
+
+/* Converts interrupt delay in microseconds to multiplier value */
+static u32 eq_delay_to_mult(u32 usec_delay)
+{
+#define MAX_INTR_RATE 651042
+ const u32 round = 10;
+ u32 multiplier;
+
+ if (usec_delay == 0)
+ multiplier = 0;
+ else {
+ u32 interrupt_rate = 1000000 / usec_delay;
+ /* Max delay, corresponding to the lowest interrupt rate */
+ if (interrupt_rate == 0)
+ multiplier = 1023;
+ else {
+ multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
+ multiplier /= interrupt_rate;
+ /* Round the multiplier to the closest value.*/
+ multiplier = (multiplier + round/2) / round;
+ multiplier = min(multiplier, (u32)1023);
+ }
+ }
+ return multiplier;
+}
+
+static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
+ struct be_mcc_wrb *wrb
+ = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+ memset(wrb, 0, sizeof(*wrb));
+ return wrb;
+}
+
+static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
+{
+ struct be_queue_info *mccq = &adapter->mcc_obj.q;
+ struct be_mcc_wrb *wrb;
+
+ if (atomic_read(&mccq->used) >= mccq->len) {
+ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
+ return NULL;
+ }
+
+ wrb = queue_head_node(mccq);
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ memset(wrb, 0, sizeof(*wrb));
+ return wrb;
+}
+
+/* Tell fw we're about to start firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_init(struct be_adapter *adapter)
+{
+ u8 *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = (u8 *)wrb_from_mbox(adapter);
+ *wrb++ = 0xFF;
+ *wrb++ = 0x12;
+ *wrb++ = 0x34;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xFF;
+ *wrb++ = 0x56;
+ *wrb++ = 0x78;
+ *wrb = 0xFF;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Tell fw we're done with firing cmds by writing a
+ * special pattern across the wrb hdr; uses mbox
+ */
+int be_cmd_fw_clean(struct be_adapter *adapter)
+{
+ u8 *wrb;
+ int status;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = (u8 *)wrb_from_mbox(adapter);
+ *wrb++ = 0xFF;
+ *wrb++ = 0xAA;
+ *wrb++ = 0xBB;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xFF;
+ *wrb++ = 0xCC;
+ *wrb++ = 0xDD;
+ *wrb = 0xFF;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+int be_cmd_eq_create(struct be_adapter *adapter,
+ struct be_queue_info *eq, int eq_delay)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eq_create *req;
+ struct be_dma_mem *q_mem = &eq->dma_mem;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
+ /* 4byte eqe*/
+ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
+ AMAP_SET_BITS(struct amap_eq_context, count, req->context,
+ __ilog2_u32(eq->len/256));
+ AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
+ eq_delay_to_mult(eq_delay));
+ be_dws_cpu_to_le(req->context, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
+ eq->id = le16_to_cpu(resp->eq_id);
+ eq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ u8 type, bool permanent, u32 if_handle)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mac_query *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_MAC_QUERY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
+
+ req->type = type;
+ if (permanent) {
+ req->permanent = 1;
+ } else {
+ req->if_id = cpu_to_le16((u16) if_handle);
+ req->permanent = 0;
+ }
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
+ memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+ u32 if_id, u32 *pmac_id, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_pmac_add *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_PMAC_ADD);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->if_id = cpu_to_le32(if_id);
+ memcpy(req->mac_address, mac_addr, ETH_ALEN);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
+ *pmac_id = le32_to_cpu(resp->pmac_id);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_pmac_del *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_PMAC_DEL);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
+
+ req->hdr.domain = dom;
+ req->if_id = cpu_to_le32(if_id);
+ req->pmac_id = cpu_to_le32(pmac_id);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses Mbox */
+int be_cmd_cq_create(struct be_adapter *adapter,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay, int coalesce_wm)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cq_create *req;
+ struct be_dma_mem *q_mem = &cq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_CQ_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 2;
+ req->page_size = 1; /* 1 for 4K */
+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
+ no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
+ ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
+ ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
+ coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
+ ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
+ }
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
+ cq->id = le16_to_cpu(resp->cq_id);
+ cq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_ext_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MCC_CREATE_EXT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
+ ctxt, 1);
+
+ } else {
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ }
+
+ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
+ req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MCC_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_mccq_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ int status;
+
+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
+ if (status && !lancer_chip(adapter)) {
+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
+ "or newer to avoid conflicting priorities between NIC "
+ "and FCoE traffic");
+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
+ }
+ return status;
+}
+
+int be_cmd_txq_create(struct be_adapter *adapter,
+ struct be_queue_info *txq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eth_tx_create *req;
+ struct be_dma_mem *q_mem = &txq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_ETH_TX_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
+ sizeof(*req));
+
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
+ adapter->if_handle);
+ }
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->ulp_num = BE_ULP1_NUM;
+ req->type = BE_ETH_TX_RING_TYPE_STANDARD;
+
+ AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
+ be_encoded_q_len(txq->len));
+ AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
+ txq->id = le16_to_cpu(resp->cid);
+ txq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+/* Uses MCC */
+int be_cmd_rxq_create(struct be_adapter *adapter,
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_eth_rx_create *req;
+ struct be_dma_mem *q_mem = &rxq->dma_mem;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_ETH_RX_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
+ sizeof(*req));
+
+ req->cq_id = cpu_to_le16(cq_id);
+ req->frag_size = fls(frag_size) - 1;
+ req->num_pages = 2;
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+ req->interface_id = cpu_to_le32(if_id);
+ req->max_frame_size = cpu_to_le16(max_frame_size);
+ req->rss_queue = cpu_to_le32(rss);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
+ rxq->id = le16_to_cpu(resp->id);
+ rxq->created = true;
+ *rss_id = resp->rss_id;
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Generic destroyer function for all types of queues
+ * Uses Mbox
+ */
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int queue_type)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_q_destroy *req;
+ u8 subsys = 0, opcode = 0;
+ int status;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ switch (queue_type) {
+ case QTYPE_EQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_EQ_DESTROY;
+ break;
+ case QTYPE_CQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_CQ_DESTROY;
+ break;
+ case QTYPE_TXQ:
+ subsys = CMD_SUBSYSTEM_ETH;
+ opcode = OPCODE_ETH_TX_DESTROY;
+ break;
+ case QTYPE_RXQ:
+ subsys = CMD_SUBSYSTEM_ETH;
+ opcode = OPCODE_ETH_RX_DESTROY;
+ break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
+ default:
+ BUG();
+ }
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
+
+ be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status)
+ q->created = false;
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses MCC */
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_q_destroy *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
+ sizeof(*req));
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status)
+ q->created = false;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Create an rx filtering policy configuration on an i/f
+ * Uses mbox
+ */
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_if_create *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->capability_flags = cpu_to_le32(cap_flags);
+ req->enable_flags = cpu_to_le32(en_flags);
+ req->pmac_invalid = pmac_invalid;
+ if (!pmac_invalid)
+ memcpy(req->mac_addr, mac, ETH_ALEN);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
+ *if_handle = le32_to_cpu(resp->interface_id);
+ if (!pmac_invalid)
+ *pmac_id = le32_to_cpu(resp->pmac_id);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_if_destroy *req;
+ int status;
+
+ if (adapter->eeh_err)
+ return -EIO;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->interface_id = cpu_to_le32(interface_id);
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+
+ return status;
+}
+
+/* Get stats is a non embedded command: the request is not embedded inside
+ * WRB but is a separate dma memory block
+ * Uses asynchronous MCC
+ */
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_hdr *hdr;
+ struct be_sge *sge;
+ int status = 0;
+
+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ hdr = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
+ OPCODE_ETH_GET_STATISTICS);
+
+ be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
+
+ if (adapter->generation == BE_GEN3)
+ hdr->version = 1;
+
+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(adapter);
+ adapter->stats_cmd_sent = true;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Lancer Stats */
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_pport_stats *req;
+ struct be_sge *sge;
+ int status = 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
+ OPCODE_ETH_GET_PPORT_STATS);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
+
+
+ req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
+ req->cmd_params.params.reset_stats = 0;
+
+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ be_mcc_notify(adapter);
+ adapter->stats_cmd_sent = true;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+ u16 *link_speed, u32 dom)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_link_status *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
+ *link_speed = le16_to_cpu(resp->link_speed);
+ *mac_speed = resp->mac_speed;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_cntl_addnl_attribs *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+ embedded_payload(wrb);
+ adapter->drv_stats.be_on_die_temperature =
+ resp->on_die_temperature;
+ }
+ /* If IOCTL fails once, do not bother issuing it again */
+ else
+ be_get_temp_freq = 0;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous mcc */
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fat *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MANAGE_FAT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
+ req->fat_operation = cpu_to_le32(QUERY_FAT);
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
+ if (log_size && resp->log_size)
+ *log_size = le32_to_cpu(resp->log_size) -
+ sizeof(u32);
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
+{
+ struct be_dma_mem get_fat_cmd;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fat *req;
+ struct be_sge *sge;
+ u32 offset = 0, total_size, buf_size,
+ log_offset = sizeof(u32), payload_len;
+ int status;
+
+ if (buf_len == 0)
+ return;
+
+ total_size = buf_len;
+
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
+ if (!get_fat_cmd.va) {
+ status = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while retrieving FAT data\n");
+ return;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ while (total_size) {
+ buf_size = min(total_size, (u32)60*1024);
+ total_size -= buf_size;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = get_fat_cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+ OPCODE_COMMON_MANAGE_FAT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, payload_len);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
+ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(get_fat_cmd.size);
+
+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
+ req->read_log_offset = cpu_to_le32(log_offset);
+ req->read_log_length = cpu_to_le32(buf_size);
+ req->data_buffer_size = cpu_to_le32(buf_size);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
+ memcpy(buf + offset,
+ resp->data_buffer,
+ resp->read_log_length);
+ } else {
+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
+ goto err;
+ }
+ offset += buf_size;
+ log_offset += buf_size;
+ }
+err:
+ pci_free_consistent(adapter->pdev, get_fat_cmd.size,
+ get_fat_cmd.va,
+ get_fat_cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+}
+
+/* Uses Mbox */
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fw_version *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_FW_VERSION);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
+ strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* set the EQ delay interval of an EQ to specified value
+ * Uses async mcc
+ */
+int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_modify_eq_delay *req;
+ int status = 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MODIFY_EQ_DELAY);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+
+ req->num_eq = cpu_to_le32(1);
+ req->delay[0].eq_id = cpu_to_le32(eq_id);
+ req->delay[0].phase = 0;
+ req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+
+ be_mcc_notify(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sycnhronous mcc */
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+ u32 num, bool untagged, bool promiscuous)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_vlan_config *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_NTWK_VLAN_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
+
+ req->interface_id = if_id;
+ req->promiscuous = promiscuous;
+ req->untagged = untagged;
+ req->num_vlan = num;
+ if (!promiscuous) {
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
+ }
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_dma_mem *mem = &adapter->rx_filter;
+ struct be_cmd_req_rx_filter *req = mem->va;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ sge = nonembedded_sgl(wrb);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
+ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(mem->size);
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_NTWK_RX_FILTER);
+
+ memset(req, 0, sizeof(*req));
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ if (flags & IFF_PROMISC) {
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ if (value == ON)
+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
+ } else if (flags & IFF_ALLMULTI) {
+ req->if_flags_mask = req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ } else {
+ struct netdev_hw_addr *ha;
+ int i = 0;
+
+ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
+ netdev_for_each_mc_addr(ha, adapter->netdev)
+ memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
+ }
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchrounous mcc */
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_flow_control *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_SET_FLOW_CONTROL);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
+
+ req->tx_flow_control = cpu_to_le16((u16)tx_fc);
+ req->rx_flow_control = cpu_to_le16((u16)rx_fc);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sycn mcc */
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_flow_control *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_FLOW_CONTROL);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_flow_control *resp =
+ embedded_payload(wrb);
+ *tx_fc = le16_to_cpu(resp->tx_flow_control);
+ *rx_fc = le16_to_cpu(resp->rx_flow_control);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+ u32 *mode, u32 *caps)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_query_fw_cfg *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
+ *port_num = le32_to_cpu(resp->phys_port);
+ *mode = le32_to_cpu(resp->function_mode);
+ *caps = le32_to_cpu(resp->function_caps);
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_reset_function(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_hdr *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_FUNCTION_RESET);
+
+ be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_rss_config *req;
+ u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
+ 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_ETH_RSS_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req));
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
+ memcpy(req->cpu_table, rsstable, table_size);
+ memcpy(req->hash, myhash, sizeof(myhash));
+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
+ u8 bcn, u8 sts, u8 state)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_enable_disable_beacon *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
+
+ req->port_num = port_num;
+ req->beacon_state = state;
+ req->beacon_duration = bcn;
+ req->status_duration = sts;
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses sync mcc */
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_beacon_state *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_BEACON_STATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
+
+ req->port_num = port_num;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_beacon_state *resp =
+ embedded_payload(wrb);
+ *state = resp->beacon_state;
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_written, u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_write_object *req;
+ struct lancer_cmd_resp_write_object *resp;
+ void *ctxt = NULL;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+ adapter->flash_status = 0;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object),
+ true, 1, OPCODE_COMMON_WRITE_OBJECT);
+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object));
+
+ ctxt = &req->context;
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ write_length, ctxt, data_size);
+
+ if (data_size == 0)
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ eof, ctxt, 1);
+ else
+ AMAP_SET_BITS(struct amap_lancer_write_obj_context,
+ eof, ctxt, 0);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+ req->write_offset = cpu_to_le32(data_offset);
+ strcpy(req->object_name, obj_name);
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma +
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
+ sizeof(struct lancer_cmd_req_write_object)));
+
+ be_mcc_notify(adapter);
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->flash_compl,
+ msecs_to_jiffies(12000)))
+ status = -1;
+ else
+ status = adapter->flash_status;
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_written = le32_to_cpu(resp->actual_write_len);
+ } else {
+ *addn_status = resp->additional_status;
+ status = resp->status;
+ }
+
+ return status;
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 flash_type, u32 flash_opcode, u32 buf_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_write_flashrom *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+ adapter->flash_status = 0;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+ req = cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
+ OPCODE_COMMON_WRITE_FLASHROM);
+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd->size);
+
+ req->params.op_type = cpu_to_le32(flash_type);
+ req->params.op_code = cpu_to_le32(flash_opcode);
+ req->params.data_buf_size = cpu_to_le32(buf_size);
+
+ be_mcc_notify(adapter);
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->flash_compl,
+ msecs_to_jiffies(12000)))
+ status = -1;
+ else
+ status = adapter->flash_status;
+
+ return status;
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_write_flashrom *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
+ OPCODE_COMMON_READ_FLASHROM);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
+
+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
+ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
+ req->params.offset = cpu_to_le32(offset);
+ req->params.data_buf_size = cpu_to_le32(0x4);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status)
+ memcpy(flashed_crc, req->params.data_buf, 4);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_acpi_wol_magic_config *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
+ memcpy(req->magic_mac, mac, ETH_ALEN);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_lmode *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ sizeof(*req));
+
+ req->src_port = port_num;
+ req->dest_port = port_num;
+ req->loopback_type = loopback_type;
+ req->loopback_state = enable;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_loopback_test *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_LOOPBACK_TEST);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+ req->hdr.timeout = cpu_to_le32(4);
+
+ req->pattern = cpu_to_le64(pattern);
+ req->src_port = cpu_to_le32(port_num);
+ req->dest_port = cpu_to_le32(port_num);
+ req->pkt_size = cpu_to_le32(pkt_size);
+ req->num_pkts = cpu_to_le32(num_pkts);
+ req->loopback_type = cpu_to_le32(loopback_type);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
+ status = le32_to_cpu(resp->status);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+ u32 byte_cnt, struct be_dma_mem *cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_ddrdma_test *req;
+ struct be_sge *sge;
+ int status;
+ int i, j = 0;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = cmd->va;
+ sge = nonembedded_sgl(wrb);
+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
+ OPCODE_LOWLEVEL_HOST_DDR_DMA);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd->size);
+
+ req->pattern = cpu_to_le64(pattern);
+ req->byte_count = cpu_to_le32(byte_cnt);
+ for (i = 0; i < byte_cnt; i++) {
+ req->snd_buff[i] = (u8)(pattern >> (j*8));
+ j++;
+ if (j > 7)
+ j = 0;
+ }
+
+ status = be_mcc_notify_wait(adapter);
+
+ if (!status) {
+ struct be_cmd_resp_ddrdma_test *resp;
+ resp = cmd->va;
+ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
+ resp->snd_err) {
+ status = -1;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_SEEPROM_READ);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_phy_info(struct be_adapter *adapter,
+ struct be_phy_info *phy_info)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_phy_info *req;
+ struct be_sge *sge;
+ struct be_dma_mem cmd;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
+ &cmd.dma);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ status = -ENOMEM;
+ goto err;
+ }
+
+ req = cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_GET_PHY_DETAILS);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PHY_DETAILS,
+ sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
+ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(cmd.size);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_phy_info *resp_phy_info =
+ cmd.va + sizeof(struct be_cmd_req_hdr);
+ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
+ phy_info->interface_type =
+ le16_to_cpu(resp_phy_info->interface_type);
+ }
+ pci_free_consistent(adapter->pdev, cmd.size,
+ cmd.va, cmd.dma);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_qos *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_SET_QOS);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_QOS, sizeof(*req));
+
+ req->hdr.domain = domain;
+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+ req->max_bps_nic = cpu_to_le32(bps);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cntl_attribs *req;
+ struct be_cmd_resp_cntl_attribs *resp;
+ struct be_sge *sge;
+ int status;
+ int payload_len = max(sizeof(*req), sizeof(*resp));
+ struct mgmt_controller_attrib *attribs;
+ struct be_dma_mem attribs_cmd;
+
+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+ &attribs_cmd.dma);
+ if (!attribs_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = attribs_cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(attribs_cmd.size);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
+ attribs_cmd.dma);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_req_native_mode(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_func_cap *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
+
+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
+ CAPABILITY_BE3_NATIVE_ERX_API);
+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
+ CAPABILITY_BE3_NATIVE_ERX_API;
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
--- /dev/null
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/*
+ * The driver sends configuration and managements command requests to the
+ * firmware in the BE. These requests are communicated to the processor
+ * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
+ * WRB inside a MAILBOX.
+ * The commands are serviced by the ARM processor in the BladeEngine's MPU.
+ */
+
+struct be_sge {
+ u32 pa_lo;
+ u32 pa_hi;
+ u32 len;
+};
+
+#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
+#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
+#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
+struct be_mcc_wrb {
+ u32 embedded; /* dword 0 */
+ u32 payload_length; /* dword 1 */
+ u32 tag0; /* dword 2 */
+ u32 tag1; /* dword 3 */
+ u32 rsvd; /* dword 4 */
+ union {
+ u8 embedded_payload[236]; /* used by embedded cmds */
+ struct be_sge sgl[19]; /* used by non-embedded cmds */
+ } payload;
+};
+
+#define CQE_FLAGS_VALID_MASK (1 << 31)
+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
+#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
+#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
+
+/* Completion Status */
+enum {
+ MCC_STATUS_SUCCESS = 0,
+ MCC_STATUS_FAILED = 1,
+ MCC_STATUS_ILLEGAL_REQUEST = 2,
+ MCC_STATUS_ILLEGAL_FIELD = 3,
+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
+ MCC_STATUS_NOT_SUPPORTED = 66
+};
+
+#define CQE_STATUS_COMPL_MASK 0xFFFF
+#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
+#define CQE_STATUS_EXTD_MASK 0xFFFF
+#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
+
+struct be_mcc_compl {
+ u32 status; /* dword 0 */
+ u32 tag0; /* dword 1 */
+ u32 tag1; /* dword 2 */
+ u32 flags; /* dword 3 */
+};
+
+/* When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+#define ASYNC_EVENT_CODE_GRP_5 0x5
+#define ASYNC_EVENT_QOS_SPEED 0x1
+#define ASYNC_EVENT_COS_PRIORITY 0x2
+#define ASYNC_EVENT_PVID_STATE 0x3
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ LINK_DOWN = 0x0,
+ LINK_UP = 0x1
+};
+#define LINK_STATUS_MASK 0x1
+
+/* When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
+/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
+ * the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_qos_link_speed {
+ u8 physical_port;
+ u8 rsvd[5];
+ u16 qos_link_speed;
+ u32 event_tag;
+ struct be_async_event_trailer trailer;
+} __packed;
+
+/* When the event code of an async trailer is GRP5 and event type is
+ * CoS-Priority, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_cos_priority {
+ u8 physical_port;
+ u8 available_priority_bmap;
+ u8 reco_default_priority;
+ u8 valid;
+ u8 rsvd0;
+ u8 event_tag;
+ struct be_async_event_trailer trailer;
+} __packed;
+
+/* When the event code of an async trailer is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+ u8 enabled;
+ u8 rsvd0;
+ u16 tag;
+ u32 event_tag;
+ u32 rsvd1;
+ struct be_async_event_trailer trailer;
+} __packed;
+
+struct be_mcc_mailbox {
+ struct be_mcc_wrb wrb;
+ struct be_mcc_compl compl;
+};
+
+#define CMD_SUBSYSTEM_COMMON 0x1
+#define CMD_SUBSYSTEM_ETH 0x3
+#define CMD_SUBSYSTEM_LOWLEVEL 0xb
+
+#define OPCODE_COMMON_NTWK_MAC_QUERY 1
+#define OPCODE_COMMON_NTWK_MAC_SET 2
+#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
+#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
+#define OPCODE_COMMON_READ_FLASHROM 6
+#define OPCODE_COMMON_WRITE_FLASHROM 7
+#define OPCODE_COMMON_CQ_CREATE 12
+#define OPCODE_COMMON_EQ_CREATE 13
+#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SET_QOS 28
+#define OPCODE_COMMON_MCC_CREATE_EXT 90
+#define OPCODE_COMMON_SEEPROM_READ 30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
+#define OPCODE_COMMON_NTWK_RX_FILTER 34
+#define OPCODE_COMMON_GET_FW_VERSION 35
+#define OPCODE_COMMON_SET_FLOW_CONTROL 36
+#define OPCODE_COMMON_GET_FLOW_CONTROL 37
+#define OPCODE_COMMON_SET_FRAME_SIZE 39
+#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
+#define OPCODE_COMMON_FIRMWARE_CONFIG 42
+#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
+#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
+#define OPCODE_COMMON_MCC_DESTROY 53
+#define OPCODE_COMMON_CQ_DESTROY 54
+#define OPCODE_COMMON_EQ_DESTROY 55
+#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
+#define OPCODE_COMMON_NTWK_PMAC_ADD 59
+#define OPCODE_COMMON_NTWK_PMAC_DEL 60
+#define OPCODE_COMMON_FUNCTION_RESET 61
+#define OPCODE_COMMON_MANAGE_FAT 68
+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
+#define OPCODE_COMMON_GET_BEACON_STATE 70
+#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PHY_DETAILS 102
+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
+#define OPCODE_COMMON_WRITE_OBJECT 172
+
+#define OPCODE_ETH_RSS_CONFIG 1
+#define OPCODE_ETH_ACPI_CONFIG 2
+#define OPCODE_ETH_PROMISCUOUS 3
+#define OPCODE_ETH_GET_STATISTICS 4
+#define OPCODE_ETH_TX_CREATE 7
+#define OPCODE_ETH_RX_CREATE 8
+#define OPCODE_ETH_TX_DESTROY 9
+#define OPCODE_ETH_RX_DESTROY 10
+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
+#define OPCODE_ETH_GET_PPORT_STATS 18
+
+#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
+#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
+
+struct be_cmd_req_hdr {
+ u8 opcode; /* dword 0 */
+ u8 subsystem; /* dword 0 */
+ u8 port_number; /* dword 0 */
+ u8 domain; /* dword 0 */
+ u32 timeout; /* dword 1 */
+ u32 request_length; /* dword 2 */
+ u8 version; /* dword 3 */
+ u8 rsvd[3]; /* dword 3 */
+};
+
+#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
+#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
+struct be_cmd_resp_hdr {
+ u32 info; /* dword 0 */
+ u32 status; /* dword 1 */
+ u32 response_length; /* dword 2 */
+ u32 actual_resp_len; /* dword 3 */
+};
+
+struct phys_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/**************************
+ * BE Command definitions *
+ **************************/
+
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_eq_context {
+ u8 cidx[13]; /* dword 0*/
+ u8 rsvd0[3]; /* dword 0*/
+ u8 epidx[13]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd1; /* dword 0*/
+ u8 size; /* dword 0*/
+ u8 pidx[13]; /* dword 1*/
+ u8 rsvd2[3]; /* dword 1*/
+ u8 pd[10]; /* dword 1*/
+ u8 count[3]; /* dword 1*/
+ u8 solevent; /* dword 1*/
+ u8 stalled; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd3[4]; /* dword 2*/
+ u8 func[8]; /* dword 2*/
+ u8 rsvd4; /* dword 2*/
+ u8 delaymult[10]; /* dword 2*/
+ u8 rsvd5[2]; /* dword 2*/
+ u8 phase[2]; /* dword 2*/
+ u8 nodelay; /* dword 2*/
+ u8 rsvd6[4]; /* dword 2*/
+ u8 rsvd7[32]; /* dword 3*/
+} __packed;
+
+struct be_cmd_req_eq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages; /* sword */
+ u16 rsvd0; /* sword */
+ u8 context[sizeof(struct amap_eq_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eq_create {
+ struct be_cmd_resp_hdr resp_hdr;
+ u16 eq_id; /* sword */
+ u16 rsvd0; /* sword */
+} __packed;
+
+/******************** Mac query ***************************/
+enum {
+ MAC_ADDRESS_TYPE_STORAGE = 0x0,
+ MAC_ADDRESS_TYPE_NETWORK = 0x1,
+ MAC_ADDRESS_TYPE_PD = 0x2,
+ MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
+};
+
+struct mac_addr {
+ u16 size_of_struct;
+ u8 addr[ETH_ALEN];
+} __packed;
+
+struct be_cmd_req_mac_query {
+ struct be_cmd_req_hdr hdr;
+ u8 type;
+ u8 permanent;
+ u16 if_id;
+} __packed;
+
+struct be_cmd_resp_mac_query {
+ struct be_cmd_resp_hdr hdr;
+ struct mac_addr mac;
+};
+
+/******************** PMac Add ***************************/
+struct be_cmd_req_pmac_add {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u8 mac_address[ETH_ALEN];
+ u8 rsvd0[2];
+} __packed;
+
+struct be_cmd_resp_pmac_add {
+ struct be_cmd_resp_hdr hdr;
+ u32 pmac_id;
+};
+
+/******************** PMac Del ***************************/
+struct be_cmd_req_pmac_del {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u32 pmac_id;
+};
+
+/******************** Create CQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_cq_context_be {
+ u8 cidx[11]; /* dword 0*/
+ u8 rsvd0; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 epidx[11]; /* dword 0*/
+ u8 rsvd1; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 solevent; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 pidx[11]; /* dword 1*/
+ u8 rsvd2; /* dword 1*/
+ u8 pd[10]; /* dword 1*/
+ u8 eqid[8]; /* dword 1*/
+ u8 stalled; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd3[4]; /* dword 2*/
+ u8 func[8]; /* dword 2*/
+ u8 rsvd4[20]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
+struct amap_cq_context_lancer {
+ u8 rsvd0[12]; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 rsvd1[12]; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd2; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 eqid[16]; /* dword 1*/
+ u8 rsvd3[15]; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd4[32]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
+struct be_cmd_req_cq_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u8 page_size;
+ u8 rsvd0;
+ u8 context[sizeof(struct amap_cq_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+
+struct be_cmd_resp_cq_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cq_id;
+ u16 rsvd0;
+} __packed;
+
+struct be_cmd_req_get_fat {
+ struct be_cmd_req_hdr hdr;
+ u32 fat_operation;
+ u32 read_log_offset;
+ u32 read_log_length;
+ u32 data_buffer_size;
+ u32 data_buffer[1];
+} __packed;
+
+struct be_cmd_resp_get_fat {
+ struct be_cmd_resp_hdr hdr;
+ u32 log_size;
+ u32 read_log_length;
+ u32 rsvd[2];
+ u32 data_buffer[1];
+} __packed;
+
+
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context_be {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct amap_mcc_context_lancer {
+ u8 async_cq_id[16];
+ u8 ring_size[4];
+ u8 rsvd0[12];
+ u8 rsvd1[31];
+ u8 valid;
+ u8 async_cq_valid[1];
+ u8 rsvd2[31];
+ u8 rsvd3[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 cq_id;
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_req_mcc_ext_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 cq_id;
+ u32 async_event_bitmap[1];
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
+/******************** Create TxQ ***************************/
+#define BE_ETH_TX_RING_TYPE_STANDARD 2
+#define BE_ULP1_NUM 1
+
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_tx_context {
+ u8 if_id[16]; /* dword 0 */
+ u8 tx_ring_size[4]; /* dword 0 */
+ u8 rsvd1[26]; /* dword 0 */
+ u8 pci_func_id[8]; /* dword 1 */
+ u8 rsvd2[9]; /* dword 1 */
+ u8 ctx_valid; /* dword 1 */
+ u8 cq_id_send[16]; /* dword 2 */
+ u8 rsvd3[16]; /* dword 2 */
+ u8 rsvd4[32]; /* dword 3 */
+ u8 rsvd5[32]; /* dword 4 */
+ u8 rsvd6[32]; /* dword 5 */
+ u8 rsvd7[32]; /* dword 6 */
+ u8 rsvd8[32]; /* dword 7 */
+ u8 rsvd9[32]; /* dword 8 */
+ u8 rsvd10[32]; /* dword 9 */
+ u8 rsvd11[32]; /* dword 10 */
+ u8 rsvd12[32]; /* dword 11 */
+ u8 rsvd13[32]; /* dword 12 */
+ u8 rsvd14[32]; /* dword 13 */
+ u8 rsvd15[32]; /* dword 14 */
+ u8 rsvd16[32]; /* dword 15 */
+} __packed;
+
+struct be_cmd_req_eth_tx_create {
+ struct be_cmd_req_hdr hdr;
+ u8 num_pages;
+ u8 ulp_num;
+ u8 type;
+ u8 bound_port;
+ u8 context[sizeof(struct amap_tx_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_eth_tx_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 cid;
+ u16 rsvd0;
+} __packed;
+
+/******************** Create RxQ ***************************/
+struct be_cmd_req_eth_rx_create {
+ struct be_cmd_req_hdr hdr;
+ u16 cq_id;
+ u8 frag_size;
+ u8 num_pages;
+ struct phys_addr pages[2];
+ u32 interface_id;
+ u16 max_frame_size;
+ u16 rsvd0;
+ u32 rss_queue;
+} __packed;
+
+struct be_cmd_resp_eth_rx_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u8 rss_id;
+ u8 rsvd0;
+} __packed;
+
+/******************** Q Destroy ***************************/
+/* Type of Queue to be destroyed */
+enum {
+ QTYPE_EQ = 1,
+ QTYPE_CQ,
+ QTYPE_TXQ,
+ QTYPE_RXQ,
+ QTYPE_MCCQ
+};
+
+struct be_cmd_req_q_destroy {
+ struct be_cmd_req_hdr hdr;
+ u16 id;
+ u16 bypass_flush; /* valid only for rx q destroy */
+} __packed;
+
+/************ I/f Create (it's actually I/f Config Create)**********/
+
+/* Capability flags for the i/f */
+enum be_if_flags {
+ BE_IF_FLAGS_RSS = 0x4,
+ BE_IF_FLAGS_PROMISCUOUS = 0x8,
+ BE_IF_FLAGS_BROADCAST = 0x10,
+ BE_IF_FLAGS_UNTAGGED = 0x20,
+ BE_IF_FLAGS_ULP = 0x40,
+ BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
+ BE_IF_FLAGS_VLAN = 0x100,
+ BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
+ BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
+ BE_IF_FLAGS_MULTICAST = 0x1000
+};
+
+/* An RX interface is an object with one or more MAC addresses and
+ * filtering capabilities. */
+struct be_cmd_req_if_create {
+ struct be_cmd_req_hdr hdr;
+ u32 version; /* ignore currently */
+ u32 capability_flags;
+ u32 enable_flags;
+ u8 mac_addr[ETH_ALEN];
+ u8 rsvd0;
+ u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
+ u32 vlan_tag; /* not used currently */
+} __packed;
+
+struct be_cmd_resp_if_create {
+ struct be_cmd_resp_hdr hdr;
+ u32 interface_id;
+ u32 pmac_id;
+};
+
+/****** I/f Destroy(it's actually I/f Config Destroy )**********/
+struct be_cmd_req_if_destroy {
+ struct be_cmd_req_hdr hdr;
+ u32 interface_id;
+};
+
+/*************** HW Stats Get **********************************/
+struct be_port_rxf_stats_v0 {
+ u32 rx_bytes_lsd; /* dword 0*/
+ u32 rx_bytes_msd; /* dword 1*/
+ u32 rx_total_frames; /* dword 2*/
+ u32 rx_unicast_frames; /* dword 3*/
+ u32 rx_multicast_frames; /* dword 4*/
+ u32 rx_broadcast_frames; /* dword 5*/
+ u32 rx_crc_errors; /* dword 6*/
+ u32 rx_alignment_symbol_errors; /* dword 7*/
+ u32 rx_pause_frames; /* dword 8*/
+ u32 rx_control_frames; /* dword 9*/
+ u32 rx_in_range_errors; /* dword 10*/
+ u32 rx_out_range_errors; /* dword 11*/
+ u32 rx_frame_too_long; /* dword 12*/
+ u32 rx_address_match_errors; /* dword 13*/
+ u32 rx_vlan_mismatch; /* dword 14*/
+ u32 rx_dropped_too_small; /* dword 15*/
+ u32 rx_dropped_too_short; /* dword 16*/
+ u32 rx_dropped_header_too_small; /* dword 17*/
+ u32 rx_dropped_tcp_length; /* dword 18*/
+ u32 rx_dropped_runt; /* dword 19*/
+ u32 rx_64_byte_packets; /* dword 20*/
+ u32 rx_65_127_byte_packets; /* dword 21*/
+ u32 rx_128_256_byte_packets; /* dword 22*/
+ u32 rx_256_511_byte_packets; /* dword 23*/
+ u32 rx_512_1023_byte_packets; /* dword 24*/
+ u32 rx_1024_1518_byte_packets; /* dword 25*/
+ u32 rx_1519_2047_byte_packets; /* dword 26*/
+ u32 rx_2048_4095_byte_packets; /* dword 27*/
+ u32 rx_4096_8191_byte_packets; /* dword 28*/
+ u32 rx_8192_9216_byte_packets; /* dword 29*/
+ u32 rx_ip_checksum_errs; /* dword 30*/
+ u32 rx_tcp_checksum_errs; /* dword 31*/
+ u32 rx_udp_checksum_errs; /* dword 32*/
+ u32 rx_non_rss_packets; /* dword 33*/
+ u32 rx_ipv4_packets; /* dword 34*/
+ u32 rx_ipv6_packets; /* dword 35*/
+ u32 rx_ipv4_bytes_lsd; /* dword 36*/
+ u32 rx_ipv4_bytes_msd; /* dword 37*/
+ u32 rx_ipv6_bytes_lsd; /* dword 38*/
+ u32 rx_ipv6_bytes_msd; /* dword 39*/
+ u32 rx_chute1_packets; /* dword 40*/
+ u32 rx_chute2_packets; /* dword 41*/
+ u32 rx_chute3_packets; /* dword 42*/
+ u32 rx_management_packets; /* dword 43*/
+ u32 rx_switched_unicast_packets; /* dword 44*/
+ u32 rx_switched_multicast_packets; /* dword 45*/
+ u32 rx_switched_broadcast_packets; /* dword 46*/
+ u32 tx_bytes_lsd; /* dword 47*/
+ u32 tx_bytes_msd; /* dword 48*/
+ u32 tx_unicastframes; /* dword 49*/
+ u32 tx_multicastframes; /* dword 50*/
+ u32 tx_broadcastframes; /* dword 51*/
+ u32 tx_pauseframes; /* dword 52*/
+ u32 tx_controlframes; /* dword 53*/
+ u32 tx_64_byte_packets; /* dword 54*/
+ u32 tx_65_127_byte_packets; /* dword 55*/
+ u32 tx_128_256_byte_packets; /* dword 56*/
+ u32 tx_256_511_byte_packets; /* dword 57*/
+ u32 tx_512_1023_byte_packets; /* dword 58*/
+ u32 tx_1024_1518_byte_packets; /* dword 59*/
+ u32 tx_1519_2047_byte_packets; /* dword 60*/
+ u32 tx_2048_4095_byte_packets; /* dword 61*/
+ u32 tx_4096_8191_byte_packets; /* dword 62*/
+ u32 tx_8192_9216_byte_packets; /* dword 63*/
+ u32 rx_fifo_overflow; /* dword 64*/
+ u32 rx_input_fifo_overflow; /* dword 65*/
+};
+
+struct be_rxf_stats_v0 {
+ struct be_port_rxf_stats_v0 port[2];
+ u32 rx_drops_no_pbuf; /* dword 132*/
+ u32 rx_drops_no_txpb; /* dword 133*/
+ u32 rx_drops_no_erx_descr; /* dword 134*/
+ u32 rx_drops_no_tpre_descr; /* dword 135*/
+ u32 management_rx_port_packets; /* dword 136*/
+ u32 management_rx_port_bytes; /* dword 137*/
+ u32 management_rx_port_pause_frames; /* dword 138*/
+ u32 management_rx_port_errors; /* dword 139*/
+ u32 management_tx_port_packets; /* dword 140*/
+ u32 management_tx_port_bytes; /* dword 141*/
+ u32 management_tx_port_pause; /* dword 142*/
+ u32 management_rx_port_rxfifo_overflow; /* dword 143*/
+ u32 rx_drops_too_many_frags; /* dword 144*/
+ u32 rx_drops_invalid_ring; /* dword 145*/
+ u32 forwarded_packets; /* dword 146*/
+ u32 rx_drops_mtu; /* dword 147*/
+ u32 rsvd0[7];
+ u32 port0_jabber_events;
+ u32 port1_jabber_events;
+ u32 rsvd1[6];
+};
+
+struct be_erx_stats_v0 {
+ u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
+ u32 rsvd[4];
+};
+
+struct be_pmem_stats {
+ u32 eth_red_drops;
+ u32 rsvd[5];
+};
+
+struct be_hw_stats_v0 {
+ struct be_rxf_stats_v0 rxf;
+ u32 rsvd[48];
+ struct be_erx_stats_v0 erx;
+ struct be_pmem_stats pmem;
+};
+
+struct be_cmd_req_get_stats_v0 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v0)];
+};
+
+struct be_cmd_resp_get_stats_v0 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v0 hw_stats;
+};
+
+struct lancer_pport_stats {
+ u32 tx_packets_lo;
+ u32 tx_packets_hi;
+ u32 tx_unicast_packets_lo;
+ u32 tx_unicast_packets_hi;
+ u32 tx_multicast_packets_lo;
+ u32 tx_multicast_packets_hi;
+ u32 tx_broadcast_packets_lo;
+ u32 tx_broadcast_packets_hi;
+ u32 tx_bytes_lo;
+ u32 tx_bytes_hi;
+ u32 tx_unicast_bytes_lo;
+ u32 tx_unicast_bytes_hi;
+ u32 tx_multicast_bytes_lo;
+ u32 tx_multicast_bytes_hi;
+ u32 tx_broadcast_bytes_lo;
+ u32 tx_broadcast_bytes_hi;
+ u32 tx_discards_lo;
+ u32 tx_discards_hi;
+ u32 tx_errors_lo;
+ u32 tx_errors_hi;
+ u32 tx_pause_frames_lo;
+ u32 tx_pause_frames_hi;
+ u32 tx_pause_on_frames_lo;
+ u32 tx_pause_on_frames_hi;
+ u32 tx_pause_off_frames_lo;
+ u32 tx_pause_off_frames_hi;
+ u32 tx_internal_mac_errors_lo;
+ u32 tx_internal_mac_errors_hi;
+ u32 tx_control_frames_lo;
+ u32 tx_control_frames_hi;
+ u32 tx_packets_64_bytes_lo;
+ u32 tx_packets_64_bytes_hi;
+ u32 tx_packets_65_to_127_bytes_lo;
+ u32 tx_packets_65_to_127_bytes_hi;
+ u32 tx_packets_128_to_255_bytes_lo;
+ u32 tx_packets_128_to_255_bytes_hi;
+ u32 tx_packets_256_to_511_bytes_lo;
+ u32 tx_packets_256_to_511_bytes_hi;
+ u32 tx_packets_512_to_1023_bytes_lo;
+ u32 tx_packets_512_to_1023_bytes_hi;
+ u32 tx_packets_1024_to_1518_bytes_lo;
+ u32 tx_packets_1024_to_1518_bytes_hi;
+ u32 tx_packets_1519_to_2047_bytes_lo;
+ u32 tx_packets_1519_to_2047_bytes_hi;
+ u32 tx_packets_2048_to_4095_bytes_lo;
+ u32 tx_packets_2048_to_4095_bytes_hi;
+ u32 tx_packets_4096_to_8191_bytes_lo;
+ u32 tx_packets_4096_to_8191_bytes_hi;
+ u32 tx_packets_8192_to_9216_bytes_lo;
+ u32 tx_packets_8192_to_9216_bytes_hi;
+ u32 tx_lso_packets_lo;
+ u32 tx_lso_packets_hi;
+ u32 rx_packets_lo;
+ u32 rx_packets_hi;
+ u32 rx_unicast_packets_lo;
+ u32 rx_unicast_packets_hi;
+ u32 rx_multicast_packets_lo;
+ u32 rx_multicast_packets_hi;
+ u32 rx_broadcast_packets_lo;
+ u32 rx_broadcast_packets_hi;
+ u32 rx_bytes_lo;
+ u32 rx_bytes_hi;
+ u32 rx_unicast_bytes_lo;
+ u32 rx_unicast_bytes_hi;
+ u32 rx_multicast_bytes_lo;
+ u32 rx_multicast_bytes_hi;
+ u32 rx_broadcast_bytes_lo;
+ u32 rx_broadcast_bytes_hi;
+ u32 rx_unknown_protos;
+ u32 rsvd_69; /* Word 69 is reserved */
+ u32 rx_discards_lo;
+ u32 rx_discards_hi;
+ u32 rx_errors_lo;
+ u32 rx_errors_hi;
+ u32 rx_crc_errors_lo;
+ u32 rx_crc_errors_hi;
+ u32 rx_alignment_errors_lo;
+ u32 rx_alignment_errors_hi;
+ u32 rx_symbol_errors_lo;
+ u32 rx_symbol_errors_hi;
+ u32 rx_pause_frames_lo;
+ u32 rx_pause_frames_hi;
+ u32 rx_pause_on_frames_lo;
+ u32 rx_pause_on_frames_hi;
+ u32 rx_pause_off_frames_lo;
+ u32 rx_pause_off_frames_hi;
+ u32 rx_frames_too_long_lo;
+ u32 rx_frames_too_long_hi;
+ u32 rx_internal_mac_errors_lo;
+ u32 rx_internal_mac_errors_hi;
+ u32 rx_undersize_packets;
+ u32 rx_oversize_packets;
+ u32 rx_fragment_packets;
+ u32 rx_jabbers;
+ u32 rx_control_frames_lo;
+ u32 rx_control_frames_hi;
+ u32 rx_control_frames_unknown_opcode_lo;
+ u32 rx_control_frames_unknown_opcode_hi;
+ u32 rx_in_range_errors;
+ u32 rx_out_of_range_errors;
+ u32 rx_address_match_errors;
+ u32 rx_vlan_mismatch_errors;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_invalid_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rx_ip_checksum_errors;
+ u32 rx_tcp_checksum_errors;
+ u32 rx_udp_checksum_errors;
+ u32 rx_non_rss_packets;
+ u32 rsvd_111;
+ u32 rx_ipv4_packets_lo;
+ u32 rx_ipv4_packets_hi;
+ u32 rx_ipv6_packets_lo;
+ u32 rx_ipv6_packets_hi;
+ u32 rx_ipv4_bytes_lo;
+ u32 rx_ipv4_bytes_hi;
+ u32 rx_ipv6_bytes_lo;
+ u32 rx_ipv6_bytes_hi;
+ u32 rx_nic_packets_lo;
+ u32 rx_nic_packets_hi;
+ u32 rx_tcp_packets_lo;
+ u32 rx_tcp_packets_hi;
+ u32 rx_iscsi_packets_lo;
+ u32 rx_iscsi_packets_hi;
+ u32 rx_management_packets_lo;
+ u32 rx_management_packets_hi;
+ u32 rx_switched_unicast_packets_lo;
+ u32 rx_switched_unicast_packets_hi;
+ u32 rx_switched_multicast_packets_lo;
+ u32 rx_switched_multicast_packets_hi;
+ u32 rx_switched_broadcast_packets_lo;
+ u32 rx_switched_broadcast_packets_hi;
+ u32 num_forwards_lo;
+ u32 num_forwards_hi;
+ u32 rx_fifo_overflow;
+ u32 rx_input_fifo_overflow;
+ u32 rx_drops_too_many_frags_lo;
+ u32 rx_drops_too_many_frags_hi;
+ u32 rx_drops_invalid_queue;
+ u32 rsvd_141;
+ u32 rx_drops_mtu_lo;
+ u32 rx_drops_mtu_hi;
+ u32 rx_packets_64_bytes_lo;
+ u32 rx_packets_64_bytes_hi;
+ u32 rx_packets_65_to_127_bytes_lo;
+ u32 rx_packets_65_to_127_bytes_hi;
+ u32 rx_packets_128_to_255_bytes_lo;
+ u32 rx_packets_128_to_255_bytes_hi;
+ u32 rx_packets_256_to_511_bytes_lo;
+ u32 rx_packets_256_to_511_bytes_hi;
+ u32 rx_packets_512_to_1023_bytes_lo;
+ u32 rx_packets_512_to_1023_bytes_hi;
+ u32 rx_packets_1024_to_1518_bytes_lo;
+ u32 rx_packets_1024_to_1518_bytes_hi;
+ u32 rx_packets_1519_to_2047_bytes_lo;
+ u32 rx_packets_1519_to_2047_bytes_hi;
+ u32 rx_packets_2048_to_4095_bytes_lo;
+ u32 rx_packets_2048_to_4095_bytes_hi;
+ u32 rx_packets_4096_to_8191_bytes_lo;
+ u32 rx_packets_4096_to_8191_bytes_hi;
+ u32 rx_packets_8192_to_9216_bytes_lo;
+ u32 rx_packets_8192_to_9216_bytes_hi;
+};
+
+struct pport_stats_params {
+ u16 pport_num;
+ u8 rsvd;
+ u8 reset_stats;
+};
+
+struct lancer_cmd_req_pport_stats {
+ struct be_cmd_req_hdr hdr;
+ union {
+ struct pport_stats_params params;
+ u8 rsvd[sizeof(struct lancer_pport_stats)];
+ } cmd_params;
+};
+
+struct lancer_cmd_resp_pport_stats {
+ struct be_cmd_resp_hdr hdr;
+ struct lancer_pport_stats pport_stats;
+};
+
+static inline struct lancer_pport_stats*
+ pport_stats_from_cmd(struct be_adapter *adapter)
+{
+ struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va;
+ return &cmd->pport_stats;
+}
+
+struct be_cmd_req_get_cntl_addnl_attribs {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ u16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd0;
+ u8 on_die_temperature; /* in degrees centigrade*/
+ u8 rsvd1[3];
+};
+
+struct be_cmd_req_vlan_config {
+ struct be_cmd_req_hdr hdr;
+ u8 interface_id;
+ u8 promiscuous;
+ u8 untagged;
+ u8 num_vlan;
+ u16 normal_vlan[64];
+} __packed;
+
+/******************* RX FILTER ******************************/
+#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
+struct macaddr {
+ u8 byte[ETH_ALEN];
+};
+
+struct be_cmd_req_rx_filter {
+ struct be_cmd_req_hdr hdr;
+ u32 global_flags_mask;
+ u32 global_flags;
+ u32 if_flags_mask;
+ u32 if_flags;
+ u32 if_id;
+ u32 mcast_num;
+ struct macaddr mcast_mac[BE_MAX_MC];
+};
+
+/******************** Link Status Query *******************/
+struct be_cmd_req_link_status {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+enum {
+ PHY_LINK_DUPLEX_NONE = 0x0,
+ PHY_LINK_DUPLEX_HALF = 0x1,
+ PHY_LINK_DUPLEX_FULL = 0x2
+};
+
+enum {
+ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
+ PHY_LINK_SPEED_10MBPS = 0x1,
+ PHY_LINK_SPEED_100MBPS = 0x2,
+ PHY_LINK_SPEED_1GBPS = 0x3,
+ PHY_LINK_SPEED_10GBPS = 0x4
+};
+
+struct be_cmd_resp_link_status {
+ struct be_cmd_resp_hdr hdr;
+ u8 physical_port;
+ u8 mac_duplex;
+ u8 mac_speed;
+ u8 mac_fault;
+ u8 mgmt_mac_duplex;
+ u8 mgmt_mac_speed;
+ u16 link_speed;
+ u32 rsvd0;
+} __packed;
+
+/******************** Port Identification ***************************/
+/* Identifies the type of port attached to NIC */
+struct be_cmd_req_port_type {
+ struct be_cmd_req_hdr hdr;
+ u32 page_num;
+ u32 port;
+};
+
+enum {
+ TR_PAGE_A0 = 0xa0,
+ TR_PAGE_A2 = 0xa2
+};
+
+struct be_cmd_resp_port_type {
+ struct be_cmd_resp_hdr hdr;
+ u32 page_num;
+ u32 port;
+ struct data {
+ u8 identifier;
+ u8 identifier_ext;
+ u8 connector;
+ u8 transceiver[8];
+ u8 rsvd0[3];
+ u8 length_km;
+ u8 length_hm;
+ u8 length_om1;
+ u8 length_om2;
+ u8 length_cu;
+ u8 length_cu_m;
+ u8 vendor_name[16];
+ u8 rsvd;
+ u8 vendor_oui[3];
+ u8 vendor_pn[16];
+ u8 vendor_rev[4];
+ } data;
+};
+
+/******************** Get FW Version *******************/
+struct be_cmd_req_get_fw_version {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[FW_VER_LEN];
+ u8 rsvd1[FW_VER_LEN];
+} __packed;
+
+struct be_cmd_resp_get_fw_version {
+ struct be_cmd_resp_hdr hdr;
+ u8 firmware_version_string[FW_VER_LEN];
+ u8 fw_on_flash_version_string[FW_VER_LEN];
+} __packed;
+
+/******************** Set Flow Contrl *******************/
+struct be_cmd_req_set_flow_control {
+ struct be_cmd_req_hdr hdr;
+ u16 tx_flow_control;
+ u16 rx_flow_control;
+} __packed;
+
+/******************** Get Flow Contrl *******************/
+struct be_cmd_req_get_flow_control {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+};
+
+struct be_cmd_resp_get_flow_control {
+ struct be_cmd_resp_hdr hdr;
+ u16 tx_flow_control;
+ u16 rx_flow_control;
+} __packed;
+
+/******************** Modify EQ Delay *******************/
+struct be_cmd_req_modify_eq_delay {
+ struct be_cmd_req_hdr hdr;
+ u32 num_eq;
+ struct {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+ } delay[8];
+} __packed;
+
+struct be_cmd_resp_modify_eq_delay {
+ struct be_cmd_resp_hdr hdr;
+ u32 rsvd0;
+} __packed;
+
+/******************** Get FW Config *******************/
+#define BE_FUNCTION_CAPS_RSS 0x2
+struct be_cmd_req_query_fw_cfg {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd[31];
+};
+
+struct be_cmd_resp_query_fw_cfg {
+ struct be_cmd_resp_hdr hdr;
+ u32 be_config_number;
+ u32 asic_revision;
+ u32 phys_port;
+ u32 function_mode;
+ u32 rsvd[26];
+ u32 function_caps;
+};
+
+/******************** RSS Config *******************/
+/* RSS types */
+#define RSS_ENABLE_NONE 0x0
+#define RSS_ENABLE_IPV4 0x1
+#define RSS_ENABLE_TCP_IPV4 0x2
+#define RSS_ENABLE_IPV6 0x4
+#define RSS_ENABLE_TCP_IPV6 0x8
+
+struct be_cmd_req_rss_config {
+ struct be_cmd_req_hdr hdr;
+ u32 if_id;
+ u16 enable_rss;
+ u16 cpu_table_size_log2;
+ u32 hash[10];
+ u8 cpu_table[128];
+ u8 flush;
+ u8 rsvd0[3];
+};
+
+/******************** Port Beacon ***************************/
+
+#define BEACON_STATE_ENABLED 0x1
+#define BEACON_STATE_DISABLED 0x0
+
+struct be_cmd_req_enable_disable_beacon {
+ struct be_cmd_req_hdr hdr;
+ u8 port_num;
+ u8 beacon_state;
+ u8 beacon_duration;
+ u8 status_duration;
+} __packed;
+
+struct be_cmd_resp_enable_disable_beacon {
+ struct be_cmd_resp_hdr resp_hdr;
+ u32 rsvd0;
+} __packed;
+
+struct be_cmd_req_get_beacon_state {
+ struct be_cmd_req_hdr hdr;
+ u8 port_num;
+ u8 rsvd0;
+ u16 rsvd1;
+} __packed;
+
+struct be_cmd_resp_get_beacon_state {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 beacon_state;
+ u8 rsvd0[3];
+} __packed;
+
+/****************** Firmware Flash ******************/
+struct flashrom_params {
+ u32 op_code;
+ u32 op_type;
+ u32 data_buf_size;
+ u32 offset;
+ u8 data_buf[4];
+};
+
+struct be_cmd_write_flashrom {
+ struct be_cmd_req_hdr hdr;
+ struct flashrom_params params;
+};
+
+/**************** Lancer Firmware Flash ************/
+struct amap_lancer_write_obj_context {
+ u8 write_length[24];
+ u8 reserved1[7];
+ u8 eof;
+} __packed;
+
+struct lancer_cmd_req_write_object {
+ struct be_cmd_req_hdr hdr;
+ u8 context[sizeof(struct amap_lancer_write_obj_context) / 8];
+ u32 write_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+struct lancer_cmd_resp_write_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_write_len;
+};
+
+/************************ WOL *******************************/
+struct be_cmd_req_acpi_wol_magic_config{
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0[145];
+ u8 magic_mac[6];
+ u8 rsvd2[2];
+} __packed;
+
+/********************** LoopBack test *********************/
+struct be_cmd_req_loopback_test {
+ struct be_cmd_req_hdr hdr;
+ u32 loopback_type;
+ u32 num_pkts;
+ u64 pattern;
+ u32 src_port;
+ u32 dest_port;
+ u32 pkt_size;
+};
+
+struct be_cmd_resp_loopback_test {
+ struct be_cmd_resp_hdr resp_hdr;
+ u32 status;
+ u32 num_txfer;
+ u32 num_rx;
+ u32 miscomp_off;
+ u32 ticks_compl;
+};
+
+struct be_cmd_req_set_lmode {
+ struct be_cmd_req_hdr hdr;
+ u8 src_port;
+ u8 dest_port;
+ u8 loopback_type;
+ u8 loopback_state;
+};
+
+struct be_cmd_resp_set_lmode {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 rsvd0[4];
+};
+
+/********************** DDR DMA test *********************/
+struct be_cmd_req_ddrdma_test {
+ struct be_cmd_req_hdr hdr;
+ u64 pattern;
+ u32 byte_count;
+ u32 rsvd0;
+ u8 snd_buff[4096];
+ u8 rsvd1[4096];
+};
+
+struct be_cmd_resp_ddrdma_test {
+ struct be_cmd_resp_hdr hdr;
+ u64 pattern;
+ u32 byte_cnt;
+ u32 snd_err;
+ u8 rsvd0[4096];
+ u8 rcv_buff[4096];
+};
+
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
+enum {
+ PHY_TYPE_CX4_10GB = 0,
+ PHY_TYPE_XFP_10GB,
+ PHY_TYPE_SFP_1GB,
+ PHY_TYPE_SFP_PLUS_10GB,
+ PHY_TYPE_KR_10GB,
+ PHY_TYPE_KX4_10GB,
+ PHY_TYPE_BASET_10GB,
+ PHY_TYPE_BASET_1GB,
+ PHY_TYPE_DISABLED = 255
+};
+
+struct be_cmd_req_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[24];
+};
+
+struct be_phy_info {
+ u16 phy_type;
+ u16 interface_type;
+ u32 misc_params;
+ u32 future_use[4];
+};
+
+struct be_cmd_resp_get_phy_info {
+ struct be_cmd_req_hdr hdr;
+ struct be_phy_info phy_info;
+};
+
+/*********************** Set QOS ***********************/
+
+#define BE_QOS_BITS_NIC 1
+
+struct be_cmd_req_set_qos {
+ struct be_cmd_req_hdr hdr;
+ u32 valid_bits;
+ u32 max_bps_nic;
+ u32 rsvd[7];
+};
+
+struct be_cmd_resp_set_qos {
+ struct be_cmd_resp_hdr hdr;
+ u32 rsvd;
+};
+
+/*********************** Controller Attributes ***********************/
+struct be_cmd_req_cntl_attribs {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_controller_attrib attribs;
+};
+
+/*********************** Set driver function ***********************/
+#define CAPABILITY_SW_TIMESTAMPS 2
+#define CAPABILITY_BE3_NATIVE_ERX_API 4
+
+struct be_cmd_req_set_func_cap {
+ struct be_cmd_req_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
+struct be_cmd_resp_set_func_cap {
+ struct be_cmd_resp_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
+/*************** HW Stats Get v1 **********************************/
+#define BE_TXP_SW_SZ 48
+struct be_port_rxf_stats_v1 {
+ u32 rsvd0[12];
+ u32 rx_crc_errors;
+ u32 rx_alignment_symbol_errors;
+ u32 rx_pause_frames;
+ u32 rx_priority_pause_frames;
+ u32 rx_control_frames;
+ u32 rx_in_range_errors;
+ u32 rx_out_range_errors;
+ u32 rx_frame_too_long;
+ u32 rx_address_match_errors;
+ u32 rx_dropped_too_small;
+ u32 rx_dropped_too_short;
+ u32 rx_dropped_header_too_small;
+ u32 rx_dropped_tcp_length;
+ u32 rx_dropped_runt;
+ u32 rsvd1[10];
+ u32 rx_ip_checksum_errs;
+ u32 rx_tcp_checksum_errs;
+ u32 rx_udp_checksum_errs;
+ u32 rsvd2[7];
+ u32 rx_switched_unicast_packets;
+ u32 rx_switched_multicast_packets;
+ u32 rx_switched_broadcast_packets;
+ u32 rsvd3[3];
+ u32 tx_pauseframes;
+ u32 tx_priority_pauseframes;
+ u32 tx_controlframes;
+ u32 rsvd4[10];
+ u32 rxpp_fifo_overflow_drop;
+ u32 rx_input_fifo_overflow_drop;
+ u32 pmem_fifo_overflow_drop;
+ u32 jabber_events;
+ u32 rsvd5[3];
+};
+
+
+struct be_rxf_stats_v1 {
+ struct be_port_rxf_stats_v1 port[4];
+ u32 rsvd0[2];
+ u32 rx_drops_no_pbuf;
+ u32 rx_drops_no_txpb;
+ u32 rx_drops_no_erx_descr;
+ u32 rx_drops_no_tpre_descr;
+ u32 rsvd1[6];
+ u32 rx_drops_too_many_frags;
+ u32 rx_drops_invalid_ring;
+ u32 forwarded_packets;
+ u32 rx_drops_mtu;
+ u32 rsvd2[14];
+};
+
+struct be_erx_stats_v1 {
+ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
+ u32 rsvd[4];
+};
+
+struct be_hw_stats_v1 {
+ struct be_rxf_stats_v1 rxf;
+ u32 rsvd0[BE_TXP_SW_SZ];
+ struct be_erx_stats_v1 erx;
+ struct be_pmem_stats pmem;
+ u32 rsvd1[3];
+};
+
+struct be_cmd_req_get_stats_v1 {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[sizeof(struct be_hw_stats_v1)];
+};
+
+struct be_cmd_resp_get_stats_v1 {
+ struct be_cmd_resp_hdr hdr;
+ struct be_hw_stats_v1 hw_stats;
+};
+
+static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (adapter->generation == BE_GEN3) {
+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ } else {
+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
+
+ return &cmd->hw_stats;
+ }
+}
+
+static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
+{
+ if (adapter->generation == BE_GEN3) {
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ } else {
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+
+ return &hw_stats->erx;
+ }
+}
+
+extern int be_pci_fnum_get(struct be_adapter *adapter);
+extern int be_cmd_POST(struct be_adapter *adapter);
+extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+ u8 type, bool permanent, u32 if_handle);
+extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+ u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+ u32 pmac_id, u32 domain);
+extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
+ u32 en_flags, u8 *mac, bool pmac_invalid,
+ u32 *if_handle, u32 *pmac_id, u32 domain);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+ u32 domain);
+extern int be_cmd_eq_create(struct be_adapter *adapter,
+ struct be_queue_info *eq, int eq_delay);
+extern int be_cmd_cq_create(struct be_adapter *adapter,
+ struct be_queue_info *cq, struct be_queue_info *eq,
+ bool sol_evts, bool no_delay,
+ int num_cqe_dma_coalesce);
+extern int be_cmd_mccq_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq);
+extern int be_cmd_txq_create(struct be_adapter *adapter,
+ struct be_queue_info *txq,
+ struct be_queue_info *cq);
+extern int be_cmd_rxq_create(struct be_adapter *adapter,
+ struct be_queue_info *rxq, u16 cq_id,
+ u16 frag_size, u16 max_frame_size, u32 if_id,
+ u32 rss, u8 *rss_id);
+extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+ int type);
+extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
+ struct be_queue_info *q);
+extern int be_cmd_link_status_query(struct be_adapter *adapter,
+ u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_reset(struct be_adapter *adapter);
+extern int be_cmd_get_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
+
+extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
+extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
+ u16 *vtag_array, u32 num, bool untagged,
+ bool promiscuous);
+extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+extern int be_cmd_set_flow_control(struct be_adapter *adapter,
+ u32 tx_fc, u32 rx_fc);
+extern int be_cmd_get_flow_control(struct be_adapter *adapter,
+ u32 *tx_fc, u32 *rx_fc);
+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
+ u32 *port_num, u32 *function_mode, u32 *function_caps);
+extern int be_cmd_reset_function(struct be_adapter *adapter);
+extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+ u16 table_size);
+extern int be_process_mcc(struct be_adapter *adapter, int *status);
+extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
+ u8 port_num, u8 beacon, u8 status, u8 state);
+extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
+ u8 port_num, u32 *state);
+extern int be_cmd_write_flashrom(struct be_adapter *adapter,
+ struct be_dma_mem *cmd, u32 flash_oper,
+ u32 flash_opcode, u32 buf_size);
+extern int lancer_cmd_write_object(struct be_adapter *adapter,
+ struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name,
+ u32 *data_written, u8 *addn_status);
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset);
+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_fw_init(struct be_adapter *adapter);
+extern int be_cmd_fw_clean(struct be_adapter *adapter);
+extern void be_async_mcc_enable(struct be_adapter *adapter);
+extern void be_async_mcc_disable(struct be_adapter *adapter);
+extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+ u32 loopback_type, u32 pkt_size,
+ u32 num_pkts, u64 pattern);
+extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
+ u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
+ struct be_phy_info *phy_info);
+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+extern int be_cmd_req_native_mode(struct be_adapter *adapter);
+extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+
--- /dev/null
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include "be.h"
+#include "be_cmds.h"
+#include <linux/ethtool.h>
+
+struct be_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int type;
+ int size;
+ int offset;
+};
+
+enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
+#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
+ offsetof(_struct, field)
+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
+ FIELDINFO(struct be_tx_stats, field)
+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
+ FIELDINFO(struct be_rx_stats, field)
+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
+ FIELDINFO(struct be_drv_stats, field)
+
+static const struct be_ethtool_stat et_stats[] = {
+ {DRVSTAT_INFO(tx_events)},
+ {DRVSTAT_INFO(rx_crc_errors)},
+ {DRVSTAT_INFO(rx_alignment_symbol_errors)},
+ {DRVSTAT_INFO(rx_pause_frames)},
+ {DRVSTAT_INFO(rx_control_frames)},
+ {DRVSTAT_INFO(rx_in_range_errors)},
+ {DRVSTAT_INFO(rx_out_range_errors)},
+ {DRVSTAT_INFO(rx_frame_too_long)},
+ {DRVSTAT_INFO(rx_address_match_errors)},
+ {DRVSTAT_INFO(rx_dropped_too_small)},
+ {DRVSTAT_INFO(rx_dropped_too_short)},
+ {DRVSTAT_INFO(rx_dropped_header_too_small)},
+ {DRVSTAT_INFO(rx_dropped_tcp_length)},
+ {DRVSTAT_INFO(rx_dropped_runt)},
+ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
+ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
+ {DRVSTAT_INFO(rx_ip_checksum_errs)},
+ {DRVSTAT_INFO(rx_tcp_checksum_errs)},
+ {DRVSTAT_INFO(rx_udp_checksum_errs)},
+ {DRVSTAT_INFO(tx_pauseframes)},
+ {DRVSTAT_INFO(tx_controlframes)},
+ {DRVSTAT_INFO(rx_priority_pause_frames)},
+ {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
+ {DRVSTAT_INFO(jabber_events)},
+ {DRVSTAT_INFO(rx_drops_no_pbuf)},
+ {DRVSTAT_INFO(rx_drops_no_txpb)},
+ {DRVSTAT_INFO(rx_drops_no_erx_descr)},
+ {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
+ {DRVSTAT_INFO(rx_drops_too_many_frags)},
+ {DRVSTAT_INFO(rx_drops_invalid_ring)},
+ {DRVSTAT_INFO(forwarded_packets)},
+ {DRVSTAT_INFO(rx_drops_mtu)},
+ {DRVSTAT_INFO(eth_red_drops)},
+ {DRVSTAT_INFO(be_on_die_temperature)}
+};
+#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
+
+/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
+ * are first and second members respectively.
+ */
+static const struct be_ethtool_stat et_rx_stats[] = {
+ {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_polls)},
+ {DRVSTAT_RX_INFO(rx_events)},
+ {DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
+ {DRVSTAT_RX_INFO(rx_post_fail)},
+ {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
+};
+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
+
+/* Stats related to multi TX queues: get_stats routine assumes compl is the
+ * first member
+ */
+static const struct be_ethtool_stat et_tx_stats[] = {
+ {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
+ {DRVSTAT_TX_INFO(tx_bytes)},
+ {DRVSTAT_TX_INFO(tx_pkts)},
+ {DRVSTAT_TX_INFO(tx_reqs)},
+ {DRVSTAT_TX_INFO(tx_wrbs)},
+ {DRVSTAT_TX_INFO(tx_compl)},
+ {DRVSTAT_TX_INFO(tx_stops)}
+};
+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
+
+static const char et_self_tests[][ETH_GSTRING_LEN] = {
+ "MAC Loopback test",
+ "PHY Loopback test",
+ "External Loopback test",
+ "DDR DMA test",
+ "Link test"
+};
+
+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
+#define BE_MAC_LOOPBACK 0x0
+#define BE_PHY_LOOPBACK 0x1
+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
+
+static void
+be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ strcpy(drvinfo->driver, DRV_NAME);
+ strcpy(drvinfo->version, DRV_VER);
+ strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
+ strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static int
+be_get_reg_len(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u32 log_size = 0;
+
+ if (be_physfn(adapter))
+ be_cmd_get_reg_len(adapter, &log_size);
+
+ return log_size;
+}
+
+static void
+be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (be_physfn(adapter)) {
+ memset(buf, 0, regs->len);
+ be_cmd_get_regs(adapter, regs->len, buf);
+ }
+}
+
+static int
+be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+
+ coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
+ coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
+ coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
+
+ coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
+ coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
+ coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
+
+ coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
+ coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
+
+ return 0;
+}
+
+/*
+ * This routine is used to set interrup coalescing delay
+ */
+static int
+be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ struct be_eq_obj *rx_eq;
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ u32 rx_max, rx_min, rx_cur;
+ int status = 0, i;
+ u32 tx_cur;
+
+ if (coalesce->use_adaptive_tx_coalesce == 1)
+ return -EINVAL;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ rx_eq = &rxo->rx_eq;
+
+ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
+ rx_eq->cur_eqd = 0;
+ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
+
+ rx_max = coalesce->rx_coalesce_usecs_high;
+ rx_min = coalesce->rx_coalesce_usecs_low;
+ rx_cur = coalesce->rx_coalesce_usecs;
+
+ if (rx_eq->enable_aic) {
+ if (rx_max > BE_MAX_EQD)
+ rx_max = BE_MAX_EQD;
+ if (rx_min > rx_max)
+ rx_min = rx_max;
+ rx_eq->max_eqd = rx_max;
+ rx_eq->min_eqd = rx_min;
+ if (rx_eq->cur_eqd > rx_max)
+ rx_eq->cur_eqd = rx_max;
+ if (rx_eq->cur_eqd < rx_min)
+ rx_eq->cur_eqd = rx_min;
+ } else {
+ if (rx_cur > BE_MAX_EQD)
+ rx_cur = BE_MAX_EQD;
+ if (rx_eq->cur_eqd != rx_cur) {
+ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
+ rx_cur);
+ if (!status)
+ rx_eq->cur_eqd = rx_cur;
+ }
+ }
+ }
+
+ tx_cur = coalesce->tx_coalesce_usecs;
+
+ if (tx_cur > BE_MAX_EQD)
+ tx_cur = BE_MAX_EQD;
+ if (tx_eq->cur_eqd != tx_cur) {
+ status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
+ if (!status)
+ tx_eq->cur_eqd = tx_cur;
+ }
+
+ return 0;
+}
+
+static void
+be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ void *p;
+ unsigned int i, j, base = 0, start;
+
+ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
+ p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
+ data[i] = *(u32 *)p;
+ }
+ base += ETHTOOL_STATS_NUM;
+
+ for_all_rx_queues(adapter, rxo, j) {
+ struct be_rx_stats *stats = rx_stats(rxo);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ data[base] = stats->rx_bytes;
+ data[base + 1] = stats->rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+
+ for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_rx_stats[i].offset;
+ data[base + i] = *(u32 *)p;
+ }
+ base += ETHTOOL_RXSTATS_NUM;
+ }
+
+ for_all_tx_queues(adapter, txo, j) {
+ struct be_tx_stats *stats = tx_stats(txo);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync_compl);
+ data[base] = stats->tx_compl;
+ } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
+ p = (u8 *)stats + et_tx_stats[i].offset;
+ data[base + i] =
+ (et_tx_stats[i].size == sizeof(u64)) ?
+ *(u64 *)p : *(u32 *)p;
+ }
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+ base += ETHTOOL_TXSTATS_NUM;
+ }
+}
+
+static void
+be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
+ memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_qs; i++) {
+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
+ sprintf(data, "rxq%d: %s", i,
+ et_rx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < adapter->num_tx_qs; i++) {
+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
+ sprintf(data, "txq%d: %s", i,
+ et_tx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
+ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int be_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return ETHTOOL_TESTS_NUM;
+ case ETH_SS_STATS:
+ return ETHTOOL_STATS_NUM +
+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_phy_info phy_info;
+ u8 mac_speed = 0;
+ u16 link_speed = 0;
+ int status;
+
+ if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
+ status = be_cmd_link_status_query(adapter, &mac_speed,
+ &link_speed, 0);
+
+ /* link_speed is in units of 10 Mbps */
+ if (link_speed) {
+ ethtool_cmd_speed_set(ecmd, link_speed*10);
+ } else {
+ switch (mac_speed) {
+ case PHY_LINK_SPEED_10MBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ break;
+ case PHY_LINK_SPEED_100MBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
+ case PHY_LINK_SPEED_1GBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ break;
+ case PHY_LINK_SPEED_10GBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case PHY_LINK_SPEED_ZERO:
+ ethtool_cmd_speed_set(ecmd, 0);
+ break;
+ }
+ }
+
+ status = be_cmd_get_phy_info(adapter, &phy_info);
+ if (!status) {
+ switch (phy_info.interface_type) {
+ case PHY_TYPE_XFP_10GB:
+ case PHY_TYPE_SFP_1GB:
+ case PHY_TYPE_SFP_PLUS_10GB:
+ ecmd->port = PORT_FIBRE;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ break;
+ }
+
+ switch (phy_info.interface_type) {
+ case PHY_TYPE_KR_10GB:
+ case PHY_TYPE_KX4_10GB:
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ default:
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+ }
+
+ /* Save for future use */
+ adapter->link_speed = ethtool_cmd_speed(ecmd);
+ adapter->port_type = ecmd->port;
+ adapter->transceiver = ecmd->transceiver;
+ adapter->autoneg = ecmd->autoneg;
+ } else {
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
+ ecmd->port = adapter->port_type;
+ ecmd->transceiver = adapter->transceiver;
+ ecmd->autoneg = adapter->autoneg;
+ }
+
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->phy_address = adapter->port_num;
+ switch (ecmd->port) {
+ case PORT_FIBRE:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ break;
+ case PORT_TP:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+ break;
+ case PORT_AUI:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
+ break;
+ }
+
+ if (ecmd->autoneg) {
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ }
+
+ return 0;
+}
+
+static void
+be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
+
+ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
+ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+}
+
+static void
+be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
+ ecmd->autoneg = 0;
+}
+
+static int
+be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (ecmd->autoneg != 0)
+ return -EINVAL;
+ adapter->tx_fc = ecmd->tx_pause;
+ adapter->rx_fc = ecmd->rx_pause;
+
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
+
+ return status;
+}
+
+static int
+be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
+ &adapter->beacon_state);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ BEACON_STATE_ENABLED);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ BEACON_STATE_DISABLED);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ adapter->beacon_state);
+ }
+
+ return 0;
+}
+
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+ if (!be_physfn(adapter))
+ return false;
+ else
+ return true;
+}
+
+static void
+be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (be_is_wol_supported(adapter))
+ wol->supported = WAKE_MAGIC;
+
+ if (adapter->wol)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int
+be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
+ adapter->wol = true;
+ else
+ adapter->wol = false;
+
+ return 0;
+}
+
+static int
+be_test_ddr_dma(struct be_adapter *adapter)
+{
+ int ret, i;
+ struct be_dma_mem ddrdma_cmd;
+ static const u64 pattern[2] = {
+ 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
+ };
+
+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+ &ddrdma_cmd.dma, GFP_KERNEL);
+ if (!ddrdma_cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < 2; i++) {
+ ret = be_cmd_ddr_dma_test(adapter, pattern[i],
+ 4096, &ddrdma_cmd);
+ if (ret != 0)
+ goto err;
+ }
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+ ddrdma_cmd.dma);
+ return ret;
+}
+
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+ u64 *status)
+{
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ loopback_type, 1);
+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
+ loopback_type, 1500,
+ 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ BE_NO_LOOPBACK, 1);
+ return *status;
+}
+
+static void
+be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 mac_speed = 0;
+ u16 qos_link_speed = 0;
+
+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+
+ if (test->flags & ETH_TEST_FL_OFFLINE) {
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
+ &data[0]) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
+ &data[1]) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (be_test_ddr_dma(adapter) != 0) {
+ data[3] = 1;
+ test->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (be_cmd_link_status_query(adapter, &mac_speed,
+ &qos_link_speed, 0) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ } else if (!mac_speed) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = 1;
+ }
+}
+
+static int
+be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
+
+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
+ strcpy(file_name, efl->data);
+
+ return be_load_fw(adapter, file_name);
+}
+
+static int
+be_get_eeprom_len(struct net_device *netdev)
+{
+ return BE_READ_SEEPROM_LEN;
+}
+
+static int
+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+ &eeprom_cmd.dma, GFP_KERNEL);
+
+ if (!eeprom_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure. Could not read eeprom\n");
+ return -ENOMEM;
+ }
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
+ }
+ dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return status;
+}
+
+const struct ethtool_ops be_ethtool_ops = {
+ .get_settings = be_get_settings,
+ .get_drvinfo = be_get_drvinfo,
+ .get_wol = be_get_wol,
+ .set_wol = be_set_wol,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
+ .get_coalesce = be_get_coalesce,
+ .set_coalesce = be_set_coalesce,
+ .get_ringparam = be_get_ringparam,
+ .get_pauseparam = be_get_pauseparam,
+ .set_pauseparam = be_set_pauseparam,
+ .get_strings = be_get_stat_strings,
+ .set_phys_id = be_set_phys_id,
+ .get_sset_count = be_get_sset_count,
+ .get_ethtool_stats = be_get_ethtool_stats,
+ .get_regs_len = be_get_reg_len,
+ .get_regs = be_get_regs,
+ .flash_device = be_do_flash,
+ .self_test = be_self_test,
+};
--- /dev/null
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/********* Mailbox door bell *************/
+/* Used for driver communication with the FW.
+ * The software must write this register twice to post any command. First,
+ * it writes the register with hi=1 and the upper bits of the physical address
+ * for the MAILBOX structure. Software must poll the ready bit until this
+ * is acknowledged. Then, sotware writes the register with hi=0 with the lower
+ * bits in the address. It must poll the ready bit until the command is
+ * complete. Upon completion, the MAILBOX will contain a valid completion
+ * queue entry.
+ */
+#define MPU_MAILBOX_DB_OFFSET 0x160
+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
+#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
+
+#define MPU_EP_CONTROL 0
+
+/********** MPU semphore ******************/
+#define MPU_EP_SEMAPHORE_OFFSET 0xac
+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
+#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK 0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
+#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
+#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
+
+
+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+#define SLIPORT_STATUS_OFFSET 0x404
+#define SLIPORT_CONTROL_OFFSET 0x408
+
+#define SLIPORT_STATUS_ERR_MASK 0x80000000
+#define SLIPORT_STATUS_RN_MASK 0x01000000
+#define SLIPORT_STATUS_RDY_MASK 0x00800000
+
+
+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
+
+/********* Memory BAR register ************/
+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
+/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
+ * Disable" may still globally block interrupts in addition to individual
+ * interrupt masks; a mechanism for the device driver to block all interrupts
+ * atomically without having to arbitrate for the PCI Interrupt Disable bit
+ * with the OS.
+ */
+#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
+
+/********* Power management (WOL) **********/
+#define PCICFG_PM_CONTROL_OFFSET 0x44
+#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
+
+/********* Online Control Registers *******/
+#define PCICFG_ONLINE0 0xB0
+#define PCICFG_ONLINE1 0xB4
+
+/********* UE Status and Mask Registers ***/
+#define PCICFG_UE_STATUS_LOW 0xA0
+#define PCICFG_UE_STATUS_HIGH 0xA4
+#define PCICFG_UE_STATUS_LOW_MASK 0xA8
+#define PCICFG_UE_STATUS_HI_MASK 0xAC
+
+/******** SLI_INTF ***********************/
+#define SLI_INTF_REG_OFFSET 0x58
+#define SLI_INTF_VALID_MASK 0xE0000000
+#define SLI_INTF_VALID 0xC0000000
+#define SLI_INTF_HINT2_MASK 0x1F000000
+#define SLI_INTF_HINT2_SHIFT 24
+#define SLI_INTF_HINT1_MASK 0x00FF0000
+#define SLI_INTF_HINT1_SHIFT 16
+#define SLI_INTF_FAMILY_MASK 0x00000F00
+#define SLI_INTF_FAMILY_SHIFT 8
+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
+#define SLI_INTF_IF_TYPE_SHIFT 12
+#define SLI_INTF_REV_MASK 0x000000F0
+#define SLI_INTF_REV_SHIFT 4
+#define SLI_INTF_FT_MASK 0x00000001
+
+
+/* SLI family */
+#define BE_SLI_FAMILY 0x0
+#define LANCER_A0_SLI_FAMILY 0xA
+
+
+/********* ISR0 Register offset **********/
+#define CEV_ISR0_OFFSET 0xC18
+#define CEV_ISR_SIZE 4
+
+/********* Event Q door bell *************/
+#define DB_EQ_OFFSET DB_CQ_OFFSET
+#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
+
+/* Clear the interrupt for this eq */
+#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
+/* Must be 1 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+/* Number of event entries processed */
+#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+
+/********* Compl Q door bell *************/
+#define DB_CQ_OFFSET 0x120
+#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
+ placing at 11-15 */
+
+/* Number of event entries processed */
+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
+/* Rearm bit */
+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
+
+/********** TX ULP door bell *************/
+#define DB_TXULP1_OFFSET 0x60
+#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of tx entries posted */
+#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
+
+/********** RQ(erx) door bell ************/
+#define DB_RQ_OFFSET 0x100
+#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+/* Number of rx frags posted */
+#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
+
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+
+/********** SRIOV VF PCICFG OFFSET ********/
+#define SRIOV_VF_PCICFG_OFFSET (4096)
+
+/********** FAT TABLE ********/
+#define RETRIEVE_FAT 0
+#define QUERY_FAT 1
+
+/* Flashrom related descriptors */
+#define IMAGE_TYPE_FIRMWARE 160
+#define IMAGE_TYPE_BOOTCODE 224
+#define IMAGE_TYPE_OPTIONROM 32
+
+#define NUM_FLASHDIR_ENTRIES 32
+
+#define IMG_TYPE_ISCSI_ACTIVE 0
+#define IMG_TYPE_REDBOOT 1
+#define IMG_TYPE_BIOS 2
+#define IMG_TYPE_PXE_BIOS 3
+#define IMG_TYPE_FCOE_BIOS 8
+#define IMG_TYPE_ISCSI_BACKUP 9
+#define IMG_TYPE_FCOE_FW_ACTIVE 10
+#define IMG_TYPE_FCOE_FW_BACKUP 11
+#define IMG_TYPE_NCSI_FW 13
+#define IMG_TYPE_PHY_FW 99
+#define TN_8022 13
+
+#define ILLEGAL_IOCTL_REQ 2
+#define FLASHROM_OPER_PHY_FLASH 9
+#define FLASHROM_OPER_PHY_SAVE 10
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+
+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
+
+#define FLASH_NCSI_MAGIC (0x16032009)
+#define FLASH_NCSI_DISABLED (0)
+#define FLASH_NCSI_ENABLED (1)
+
+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
+
+/* Offsets for components on Flash. */
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
+#define FLASH_PXE_BIOS_START_g2 (7864320)
+#define FLASH_FCoE_BIOS_START_g2 (524288)
+#define FLASH_REDBOOT_START_g2 (0)
+
+#define FLASH_NCSI_START_g3 (15990784)
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
+#define FLASH_PXE_BIOS_START_g3 (13107200)
+#define FLASH_FCoE_BIOS_START_g3 (13631488)
+#define FLASH_REDBOOT_START_g3 (262144)
+#define FLASH_PHY_FW_START_g3 1310720
+
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET 0
+#define BE_MULTICAST_PACKET 1
+#define BE_BROADCAST_PACKET 2
+#define BE_RSVD_PACKET 3
+
+/*
+ * BE descriptors: host memory data structures whose formats
+ * are hardwired in BE silicon.
+ */
+/* Event Queue Descriptor */
+#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
+#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
+#define EQ_ENTRY_RES_ID_SHIFT 16
+
+struct be_eq_entry {
+ u32 evt;
+};
+
+/* TX Queue Descriptor */
+#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
+struct be_eth_wrb {
+ u32 frag_pa_hi; /* dword 0 */
+ u32 frag_pa_lo; /* dword 1 */
+ u32 rsvd0; /* dword 2 */
+ u32 frag_len; /* dword 3: bits 0 - 15 */
+} __packed;
+
+/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
+ * actual structure is defined as a byte : used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_hdr_wrb {
+ u8 rsvd0[32]; /* dword 0 */
+ u8 rsvd1[32]; /* dword 1 */
+ u8 complete; /* dword 2 */
+ u8 event;
+ u8 crc;
+ u8 forward;
+ u8 lso6;
+ u8 mgmt;
+ u8 ipcs;
+ u8 udpcs;
+ u8 tcpcs;
+ u8 lso;
+ u8 vlan;
+ u8 gso[2];
+ u8 num_wrb[5];
+ u8 lso_mss[14];
+ u8 len[16]; /* dword 3 */
+ u8 vlan_tag[16];
+} __packed;
+
+struct be_eth_hdr_wrb {
+ u32 dw[4];
+};
+
+/* TX Compl Queue Descriptor */
+
+/* Pseudo amap definition for eth_tx_compl in which each bit of the
+ * actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_tx_compl {
+ u8 wrb_index[16]; /* dword 0 */
+ u8 ct[2]; /* dword 0 */
+ u8 port[2]; /* dword 0 */
+ u8 rsvd0[8]; /* dword 0 */
+ u8 status[4]; /* dword 0 */
+ u8 user_bytes[16]; /* dword 1 */
+ u8 nwh_bytes[8]; /* dword 1 */
+ u8 lso; /* dword 1 */
+ u8 cast_enc[2]; /* dword 1 */
+ u8 rsvd1[5]; /* dword 1 */
+ u8 rsvd2[32]; /* dword 2 */
+ u8 pkts[16]; /* dword 3 */
+ u8 ringid[11]; /* dword 3 */
+ u8 hash_val[4]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct be_eth_tx_compl {
+ u32 dw[4];
+};
+
+/* RX Queue Descriptor */
+struct be_eth_rx_d {
+ u32 fragpa_hi;
+ u32 fragpa_lo;
+};
+
+/* RX Compl Queue Descriptor */
+
+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v0 {
+ u8 vlan_tag[16]; /* dword 0 */
+ u8 pktsize[14]; /* dword 0 */
+ u8 port; /* dword 0 */
+ u8 ip_opt; /* dword 0 */
+ u8 err; /* dword 1 */
+ u8 rsshp; /* dword 1 */
+ u8 ipf; /* dword 1 */
+ u8 tcpf; /* dword 1 */
+ u8 udpf; /* dword 1 */
+ u8 ipcksm; /* dword 1 */
+ u8 l4_cksm; /* dword 1 */
+ u8 ip_version; /* dword 1 */
+ u8 macdst[6]; /* dword 1 */
+ u8 vtp; /* dword 1 */
+ u8 rsvd0; /* dword 1 */
+ u8 fragndx[10]; /* dword 1 */
+ u8 ct[2]; /* dword 1 */
+ u8 sw; /* dword 1 */
+ u8 numfrags[3]; /* dword 1 */
+ u8 rss_flush; /* dword 2 */
+ u8 cast_enc[2]; /* dword 2 */
+ u8 vtm; /* dword 2 */
+ u8 rss_bank; /* dword 2 */
+ u8 rsvd1[23]; /* dword 2 */
+ u8 lro_pkt; /* dword 2 */
+ u8 rsvd2[2]; /* dword 2 */
+ u8 valid; /* dword 2 */
+ u8 rsshash[32]; /* dword 3 */
+} __packed;
+
+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v1 {
+ u8 vlan_tag[16]; /* dword 0 */
+ u8 pktsize[14]; /* dword 0 */
+ u8 vtp; /* dword 0 */
+ u8 ip_opt; /* dword 0 */
+ u8 err; /* dword 1 */
+ u8 rsshp; /* dword 1 */
+ u8 ipf; /* dword 1 */
+ u8 tcpf; /* dword 1 */
+ u8 udpf; /* dword 1 */
+ u8 ipcksm; /* dword 1 */
+ u8 l4_cksm; /* dword 1 */
+ u8 ip_version; /* dword 1 */
+ u8 macdst[7]; /* dword 1 */
+ u8 rsvd0; /* dword 1 */
+ u8 fragndx[10]; /* dword 1 */
+ u8 ct[2]; /* dword 1 */
+ u8 sw; /* dword 1 */
+ u8 numfrags[3]; /* dword 1 */
+ u8 rss_flush; /* dword 2 */
+ u8 cast_enc[2]; /* dword 2 */
+ u8 vtm; /* dword 2 */
+ u8 rss_bank; /* dword 2 */
+ u8 port[2]; /* dword 2 */
+ u8 vntagp; /* dword 2 */
+ u8 header_len[8]; /* dword 2 */
+ u8 header_split[2]; /* dword 2 */
+ u8 rsvd1[13]; /* dword 2 */
+ u8 valid; /* dword 2 */
+ u8 rsshash[32]; /* dword 3 */
+} __packed;
+
+struct be_eth_rx_compl {
+ u32 dw[4];
+};
+
+struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u32 rsvd0[3];
+ u8 ncsi_ver_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u32 rsvd0[5];
+};
+
+struct controller_id {
+ u32 vendor;
+ u32 device;
+ u32 subvendor;
+ u32 subdevice;
+};
+
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+struct flash_file_hdr_g2 {
+ u8 sign[32];
+ u32 cksum;
+ u32 antidote;
+ struct controller_id cont_id;
+ u32 file_len;
+ u32 chunk_num;
+ u32 total_chunks;
+ u32 num_imgs;
+ u8 build[24];
+};
+
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 rsvd[32];
+};
+
+struct flash_section_hdr {
+ u32 format_rev;
+ u32 cksum;
+ u32 antidote;
+ u32 build_no;
+ u8 id_string[64];
+ u32 active_entry_mask;
+ u32 valid_entry_mask;
+ u32 org_content_mask;
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+};
+
+struct flash_section_entry {
+ u32 type;
+ u32 offset;
+ u32 pad_size;
+ u32 image_size;
+ u32 cksum;
+ u32 entry_point;
+ u32 rsvd0;
+ u32 rsvd1;
+ u8 ver_data[32];
+};
+
+struct flash_section_info {
+ u8 cookie[32];
+ struct flash_section_hdr fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+};
--- /dev/null
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/prefetch.h>
+#include "be.h"
+#include "be_cmds.h"
+#include <asm/div64.h>
+
+MODULE_VERSION(DRV_VER);
+MODULE_DEVICE_TABLE(pci, be_dev_ids);
+MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
+MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_LICENSE("GPL");
+
+static ushort rx_frag_size = 2048;
+static unsigned int num_vfs;
+module_param(rx_frag_size, ushort, S_IRUGO);
+module_param(num_vfs, uint, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+
+static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, be_dev_ids);
+/* UE Status Low CSR */
+static const char * const ue_status_low_desc[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
+};
+/* UE Status High CSR */
+static const char * const ue_status_hi_desc[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
+
+static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+ if (mem->va)
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
+}
+
+static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
+ u16 len, u16 entry_size)
+{
+ struct be_dma_mem *mem = &q->dma_mem;
+
+ memset(q, 0, sizeof(*q));
+ q->len = len;
+ q->entry_size = entry_size;
+ mem->size = len * entry_size;
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
+ if (!mem->va)
+ return -1;
+ memset(mem->va, 0, mem->size);
+ return 0;
+}
+
+static void be_intr_set(struct be_adapter *adapter, bool enable)
+{
+ u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
+ u32 reg = ioread32(addr);
+ u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
+ if (!enabled && enable)
+ reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ else if (enabled && !enable)
+ reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ else
+ return;
+
+ iowrite32(reg, addr);
+}
+
+static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
+{
+ u32 val = 0;
+ val |= qid & DB_RQ_RING_ID_MASK;
+ val |= posted << DB_RQ_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_RQ_OFFSET);
+}
+
+static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
+{
+ u32 val = 0;
+ val |= qid & DB_TXULP_RING_ID_MASK;
+ val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
+
+ wmb();
+ iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
+}
+
+static void be_eq_notify(struct be_adapter *adapter, u16 qid,
+ bool arm, bool clear_int, u16 num_popped)
+{
+ u32 val = 0;
+ val |= qid & DB_EQ_RING_ID_MASK;
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
+
+ if (adapter->eeh_err)
+ return;
+
+ if (arm)
+ val |= 1 << DB_EQ_REARM_SHIFT;
+ if (clear_int)
+ val |= 1 << DB_EQ_CLR_SHIFT;
+ val |= 1 << DB_EQ_EVNT_SHIFT;
+ val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+ iowrite32(val, adapter->db + DB_EQ_OFFSET);
+}
+
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
+{
+ u32 val = 0;
+ val |= qid & DB_CQ_RING_ID_MASK;
+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
+
+ if (adapter->eeh_err)
+ return;
+
+ if (arm)
+ val |= 1 << DB_CQ_REARM_SHIFT;
+ val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
+ iowrite32(val, adapter->db + DB_CQ_OFFSET);
+}
+
+static int be_mac_addr_set(struct net_device *netdev, void *p)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int status = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* MAC addr configuration will be done in hardware for VFs
+ * by their corresponding PFs. Just copy to netdev addr here
+ */
+ if (!be_physfn(adapter))
+ goto netdev_addr;
+
+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id, 0);
+ if (status)
+ return status;
+
+ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+ adapter->if_handle, &adapter->pmac_id, 0);
+netdev_addr:
+ if (!status)
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ return status;
+}
+
+static void populate_be2_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v0 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+
+ if (adapter->port_num)
+ drvs->jabber_events = rxf_stats->port1_jabber_events;
+ else
+ drvs->jabber_events = rxf_stats->port0_jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+}
+
+static void populate_be3_stats(struct be_adapter *adapter)
+{
+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
+ struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
+ struct be_port_rxf_stats_v1 *port_stats =
+ &rxf_stats->port[adapter->port_num];
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+
+ be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
+ drvs->rx_control_frames = port_stats->rx_control_frames;
+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+ drvs->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
+ drvs->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
+ drvs->tx_controlframes = port_stats->tx_controlframes;
+ drvs->jabber_events = port_stats->jabber_events;
+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+ drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+}
+
+static void populate_lancer_stats(struct be_adapter *adapter)
+{
+
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+ struct lancer_pport_stats *pport_stats =
+ pport_stats_from_cmd(adapter);
+
+ be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
+ drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
+ drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
+ drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
+ drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
+ drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
+ drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
+ drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
+ drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
+ drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
+ drvs->rx_dropped_tcp_length =
+ pport_stats->rx_dropped_invalid_tcp_length;
+ drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
+ drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
+ drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
+ drvs->rx_dropped_header_too_small =
+ pport_stats->rx_dropped_header_too_small;
+ drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
+ drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
+ drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
+ drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
+ drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
+ drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
+ drvs->jabber_events = pport_stats->rx_jabbers;
+ drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
+ drvs->forwarded_packets = pport_stats->num_forwards_lo;
+ drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
+ drvs->rx_drops_too_many_frags =
+ pport_stats->rx_drops_too_many_frags_lo;
+}
+
+void be_parse_stats(struct be_adapter *adapter)
+{
+ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (adapter->generation == BE_GEN3) {
+ if (lancer_chip(adapter))
+ populate_lancer_stats(adapter);
+ else
+ populate_be3_stats(adapter);
+ } else {
+ populate_be2_stats(adapter);
+ }
+
+ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
+ for_all_rx_queues(adapter, rxo, i)
+ rx_stats(rxo)->rx_drops_no_frags =
+ erx->rx_drops_no_fragments[rxo->q.id];
+}
+
+static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_drv_stats *drvs = &adapter->drv_stats;
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ u64 pkts, bytes;
+ unsigned int start;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ const struct be_rx_stats *rx_stats = rx_stats(rxo);
+ do {
+ start = u64_stats_fetch_begin_bh(&rx_stats->sync);
+ pkts = rx_stats(rxo)->rx_pkts;
+ bytes = rx_stats(rxo)->rx_bytes;
+ } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
+ stats->rx_packets += pkts;
+ stats->rx_bytes += bytes;
+ stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
+ stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
+ rx_stats(rxo)->rx_drops_no_frags;
+ }
+
+ for_all_tx_queues(adapter, txo, i) {
+ const struct be_tx_stats *tx_stats = tx_stats(txo);
+ do {
+ start = u64_stats_fetch_begin_bh(&tx_stats->sync);
+ pkts = tx_stats(txo)->tx_pkts;
+ bytes = tx_stats(txo)->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
+ stats->tx_packets += pkts;
+ stats->tx_bytes += bytes;
+ }
+
+ /* bad pkts received */
+ stats->rx_errors = drvs->rx_crc_errors +
+ drvs->rx_alignment_symbol_errors +
+ drvs->rx_in_range_errors +
+ drvs->rx_out_range_errors +
+ drvs->rx_frame_too_long +
+ drvs->rx_dropped_too_small +
+ drvs->rx_dropped_too_short +
+ drvs->rx_dropped_header_too_small +
+ drvs->rx_dropped_tcp_length +
+ drvs->rx_dropped_runt;
+
+ /* detailed rx errors */
+ stats->rx_length_errors = drvs->rx_in_range_errors +
+ drvs->rx_out_range_errors +
+ drvs->rx_frame_too_long;
+
+ stats->rx_crc_errors = drvs->rx_crc_errors;
+
+ /* frame alignment errors */
+ stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
+
+ /* receiver fifo overrun */
+ /* drops_no_pbuf is no per i/f, it's per BE card */
+ stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
+ drvs->rx_input_fifo_overflow_drop +
+ drvs->rx_drops_no_pbuf;
+ return stats;
+}
+
+void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ /* when link status changes, link speed must be re-queried from card */
+ adapter->link_speed = -1;
+ if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
+ netif_carrier_on(netdev);
+ dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
+ } else {
+ netif_carrier_off(netdev);
+ dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+ }
+}
+
+static void be_tx_stats_update(struct be_tx_obj *txo,
+ u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+{
+ struct be_tx_stats *stats = tx_stats(txo);
+
+ u64_stats_update_begin(&stats->sync);
+ stats->tx_reqs++;
+ stats->tx_wrbs += wrb_cnt;
+ stats->tx_bytes += copied;
+ stats->tx_pkts += (gso_segs ? gso_segs : 1);
+ if (stopped)
+ stats->tx_stops++;
+ u64_stats_update_end(&stats->sync);
+}
+
+/* Determine number of WRB entries needed to xmit data in an skb */
+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
+ bool *dummy)
+{
+ int cnt = (skb->len > skb->data_len);
+
+ cnt += skb_shinfo(skb)->nr_frags;
+
+ /* to account for hdr wrb */
+ cnt++;
+ if (lancer_chip(adapter) || !(cnt & 1)) {
+ *dummy = false;
+ } else {
+ /* add a dummy to make it an even num */
+ cnt++;
+ *dummy = true;
+ }
+ BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
+ return cnt;
+}
+
+static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
+{
+ wrb->frag_pa_hi = upper_32_bits(addr);
+ wrb->frag_pa_lo = addr & 0xFFFFFFFF;
+ wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+}
+
+static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
+ struct sk_buff *skb, u32 wrb_cnt, u32 len)
+{
+ u8 vlan_prio = 0;
+ u16 vlan_tag = 0;
+
+ memset(hdr, 0, sizeof(*hdr));
+
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
+
+ if (skb_is_gso(skb)) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
+ hdr, skb_shinfo(skb)->gso_size);
+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
+ if (lancer_chip(adapter) && adapter->sli_family ==
+ LANCER_A0_SLI_FAMILY) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+ if (is_tcp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ tcpcs, hdr, 1);
+ else if (is_udp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ udpcs, hdr, 1);
+ }
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (is_tcp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
+ else if (is_udp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ /* If vlan priority provided by OS is NOT in available bmap */
+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+ adapter->recommended_prio;
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
+ }
+
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
+}
+
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
+ bool unmap_single)
+{
+ dma_addr_t dma;
+
+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
+
+ dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
+ if (wrb->frag_len) {
+ if (unmap_single)
+ dma_unmap_single(dev, dma, wrb->frag_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
+ }
+}
+
+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
+{
+ dma_addr_t busaddr;
+ int i, copied = 0;
+ struct device *dev = &adapter->pdev->dev;
+ struct sk_buff *first_skb = skb;
+ struct be_eth_wrb *wrb;
+ struct be_eth_hdr_wrb *hdr;
+ bool map_single = false;
+ u16 map_head;
+
+ hdr = queue_head_node(txq);
+ queue_head_inc(txq);
+ map_head = txq->head;
+
+ if (skb->len > skb->data_len) {
+ int len = skb_headlen(skb);
+ busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
+ goto dma_err;
+ map_single = true;
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, len);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ copied += len;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[i];
+ busaddr = dma_map_page(dev, frag->page, frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
+ goto dma_err;
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, busaddr, frag->size);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ copied += frag->size;
+ }
+
+ if (dummy_wrb) {
+ wrb = queue_head_node(txq);
+ wrb_fill(wrb, 0, 0);
+ be_dws_cpu_to_le(wrb, sizeof(*wrb));
+ queue_head_inc(txq);
+ }
+
+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
+ be_dws_cpu_to_le(hdr, sizeof(*hdr));
+
+ return copied;
+dma_err:
+ txq->head = map_head;
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(dev, wrb, map_single);
+ map_single = false;
+ copied -= wrb->frag_len;
+ queue_head_inc(txq);
+ }
+ return 0;
+}
+
+static netdev_tx_t be_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+ struct be_queue_info *txq = &txo->q;
+ u32 wrb_cnt = 0, copied = 0;
+ u32 start = txq->head;
+ bool dummy_wrb, stopped = false;
+
+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
+
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+ if (copied) {
+ /* record the sent skb in the sent_skb table */
+ BUG_ON(txo->sent_skb_list[start]);
+ txo->sent_skb_list[start] = skb;
+
+ /* Ensure txq has space for the next skb; Else stop the queue
+ * *BEFORE* ringing the tx doorbell, so that we serialze the
+ * tx compls of the current transmit which'll wake up the queue
+ */
+ atomic_add(wrb_cnt, &txq->used);
+ if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
+ txq->len) {
+ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
+ stopped = true;
+ }
+
+ be_txq_notify(adapter, txq->id, wrb_cnt);
+
+ be_tx_stats_update(txo, wrb_cnt, copied,
+ skb_shinfo(skb)->gso_segs, stopped);
+ } else {
+ txq->head = start;
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int be_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ if (new_mtu < BE_MIN_MTU ||
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN))) {
+ dev_info(&adapter->pdev->dev,
+ "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ return -EINVAL;
+ }
+ dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ return 0;
+}
+
+/*
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
+ */
+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
+{
+ u16 vtag[BE_NUM_VLANS_SUPPORTED];
+ u16 ntags = 0, i;
+ int status = 0;
+ u32 if_handle;
+
+ if (vf) {
+ if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
+ vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
+ status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+ }
+
+ /* No need to further configure vids if in promiscuous mode */
+ if (adapter->promiscuous)
+ return 0;
+
+ if (adapter->vlans_added <= adapter->max_vlans) {
+ /* Construct VLAN Table to give to HW */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ if (adapter->vlan_tag[i]) {
+ vtag[ntags] = cpu_to_le16(i);
+ ntags++;
+ }
+ }
+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
+ vtag, ntags, 1, 0);
+ } else {
+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
+ NULL, 0, 1, 1);
+ }
+
+ return status;
+}
+
+static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->vlans_added++;
+ if (!be_physfn(adapter))
+ return;
+
+ adapter->vlan_tag[vid] = 1;
+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ be_vid_config(adapter, false, 0);
+}
+
+static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ adapter->vlans_added--;
+
+ if (!be_physfn(adapter))
+ return;
+
+ adapter->vlan_tag[vid] = 0;
+ if (adapter->vlans_added <= adapter->max_vlans)
+ be_vid_config(adapter, false, 0);
+}
+
+static void be_set_multicast_list(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (netdev->flags & IFF_PROMISC) {
+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
+ adapter->promiscuous = true;
+ goto done;
+ }
+
+ /* BE was previously in promiscuous mode; disable it */
+ if (adapter->promiscuous) {
+ adapter->promiscuous = false;
+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter, false, 0);
+ }
+
+ /* Enable multicast promisc if num configured exceeds what we support */
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > BE_MAX_MC) {
+ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ goto done;
+ }
+
+ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
+done:
+ return;
+}
+
+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+ status = be_cmd_pmac_del(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->vf_cfg[vf].vf_if_handle,
+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+
+ if (status)
+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
+ mac, vf);
+ else
+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+ return status;
+}
+
+static int be_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *vi)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ vi->vf = vf;
+ vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
+ vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+ vi->qos = 0;
+ memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+
+ return 0;
+}
+
+static int be_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if ((vf >= num_vfs) || (vlan > 4095))
+ return -EINVAL;
+
+ if (vlan) {
+ adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+ adapter->vlans_added++;
+ } else {
+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
+ adapter->vlans_added--;
+ }
+
+ status = be_vid_config(adapter, true, vf);
+
+ if (status)
+ dev_info(&adapter->pdev->dev,
+ "VLAN %d config on VF %d failed\n", vlan, vf);
+ return status;
+}
+
+static int be_set_vf_tx_rate(struct net_device *netdev,
+ int vf, int rate)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if ((vf >= num_vfs) || (rate < 0))
+ return -EINVAL;
+
+ if (rate > 10000)
+ rate = 10000;
+
+ adapter->vf_cfg[vf].vf_tx_rate = rate;
+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
+
+ if (status)
+ dev_info(&adapter->pdev->dev,
+ "tx rate %d on VF %d failed\n", rate, vf);
+ return status;
+}
+
+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
+{
+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
+ struct be_rx_stats *stats = rx_stats(rxo);
+ ulong now = jiffies;
+ ulong delta = now - stats->rx_jiffies;
+ u64 pkts;
+ unsigned int start, eqd;
+
+ if (!rx_eq->enable_aic)
+ return;
+
+ /* Wrapped around */
+ if (time_before(now, stats->rx_jiffies)) {
+ stats->rx_jiffies = now;
+ return;
+ }
+
+ /* Update once a second */
+ if (delta < HZ)
+ return;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->sync);
+ pkts = stats->rx_pkts;
+ } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+
+ stats->rx_pps = (pkts - stats->rx_pkts_prev) / (delta / HZ);
+ stats->rx_pkts_prev = pkts;
+ stats->rx_jiffies = now;
+ eqd = stats->rx_pps / 110000;
+ eqd = eqd << 3;
+ if (eqd > rx_eq->max_eqd)
+ eqd = rx_eq->max_eqd;
+ if (eqd < rx_eq->min_eqd)
+ eqd = rx_eq->min_eqd;
+ if (eqd < 10)
+ eqd = 0;
+ if (eqd != rx_eq->cur_eqd) {
+ be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
+ rx_eq->cur_eqd = eqd;
+ }
+}
+
+static void be_rx_stats_update(struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_rx_stats *stats = rx_stats(rxo);
+
+ u64_stats_update_begin(&stats->sync);
+ stats->rx_compl++;
+ stats->rx_bytes += rxcp->pkt_size;
+ stats->rx_pkts++;
+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
+ stats->rx_mcast_pkts++;
+ if (rxcp->err)
+ stats->rx_compl_err++;
+ u64_stats_update_end(&stats->sync);
+}
+
+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
+{
+ /* L4 checksum is not reliable for non TCP/UDP packets.
+ * Also ignore ipcksm for ipv6 pkts */
+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
+ (rxcp->ip_csum || rxcp->ipv6);
+}
+
+static struct be_rx_page_info *
+get_rx_page_info(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ u16 frag_idx)
+{
+ struct be_rx_page_info *rx_page_info;
+ struct be_queue_info *rxq = &rxo->q;
+
+ rx_page_info = &rxo->page_info_tbl[frag_idx];
+ BUG_ON(!rx_page_info->page);
+
+ if (rx_page_info->last_page_user) {
+ dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ adapter->big_page_size, DMA_FROM_DEVICE);
+ rx_page_info->last_page_user = false;
+ }
+
+ atomic_dec(&rxq->used);
+ return rx_page_info;
+}
+
+/* Throwaway the data in the Rx completion */
+static void be_rx_compl_discard(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+ u16 i, num_rcvd = rxcp->num_rcvd;
+
+ for (i = 0; i < num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ }
+}
+
+/*
+ * skb_fill_rx_data forms a complete skb for an ether frame
+ * indicated by rxcp.
+ */
+static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
+ struct sk_buff *skb, struct be_rx_compl_info *rxcp)
+{
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+ u16 i, j;
+ u16 hdr_len, curr_frag_len, remaining;
+ u8 *start;
+
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ start = page_address(page_info->page) + page_info->page_offset;
+ prefetch(start);
+
+ /* Copy data in the first descriptor of this completion */
+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
+
+ /* Copy the header portion into skb_data */
+ hdr_len = min(BE_HDR_LEN, curr_frag_len);
+ memcpy(skb->data, start, hdr_len);
+ skb->len = curr_frag_len;
+ if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
+ /* Complete packet has now been moved to data */
+ put_page(page_info->page);
+ skb->data_len = 0;
+ skb->tail += curr_frag_len;
+ } else {
+ skb_shinfo(skb)->nr_frags = 1;
+ skb_shinfo(skb)->frags[0].page = page_info->page;
+ skb_shinfo(skb)->frags[0].page_offset =
+ page_info->page_offset + hdr_len;
+ skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
+ skb->data_len = curr_frag_len - hdr_len;
+ skb->tail += hdr_len;
+ }
+ page_info->page = NULL;
+
+ if (rxcp->pkt_size <= rx_frag_size) {
+ BUG_ON(rxcp->num_rcvd != 1);
+ return;
+ }
+
+ /* More frags present for this completion */
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ remaining = rxcp->pkt_size - curr_frag_len;
+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ curr_frag_len = min(remaining, rx_frag_size);
+
+ /* Coalesce all frags from the same physical page in one slot */
+ if (page_info->page_offset == 0) {
+ /* Fresh page */
+ j++;
+ skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ skb_shinfo(skb)->nr_frags++;
+ } else {
+ put_page(page_info->page);
+ }
+
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb->len += curr_frag_len;
+ skb->data_len += curr_frag_len;
+
+ remaining -= curr_frag_len;
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ page_info->page = NULL;
+ }
+ BUG_ON(j > MAX_SKB_FRAGS);
+}
+
+/* Process the RX completion indicated by rxcp when GRO is disabled */
+static void be_rx_compl_process(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_stats(rxo)->rx_drops_no_skbs++;
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ return;
+ }
+
+ skb_fill_rx_data(adapter, rxo, skb, rxcp);
+
+ if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = rxcp->rss_hash;
+
+
+ if (unlikely(rxcp->vlanf))
+ __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+
+ netif_receive_skb(skb);
+}
+
+/* Process the RX completion indicated by rxcp when GRO is enabled */
+static void be_rx_compl_process_gro(struct be_adapter *adapter,
+ struct be_rx_obj *rxo,
+ struct be_rx_compl_info *rxcp)
+{
+ struct be_rx_page_info *page_info;
+ struct sk_buff *skb = NULL;
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_eq_obj *eq_obj = &rxo->rx_eq;
+ u16 remaining, curr_frag_len;
+ u16 i, j;
+
+ skb = napi_get_frags(&eq_obj->napi);
+ if (!skb) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ return;
+ }
+
+ remaining = rxcp->pkt_size;
+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+
+ curr_frag_len = min(remaining, rx_frag_size);
+
+ /* Coalesce all frags from the same physical page in one slot */
+ if (i == 0 || page_info->page_offset == 0) {
+ /* First frag or Fresh page */
+ j++;
+ skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ } else {
+ put_page(page_info->page);
+ }
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
+
+ remaining -= curr_frag_len;
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(j > MAX_SKB_FRAGS);
+
+ skb_shinfo(skb)->nr_frags = j + 1;
+ skb->len = rxcp->pkt_size;
+ skb->data_len = rxcp->pkt_size;
+ skb->truesize += rxcp->pkt_size;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = rxcp->rss_hash;
+
+ if (unlikely(rxcp->vlanf))
+ __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+
+ napi_gro_frags(&eq_obj->napi);
+}
+
+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
+ struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
+ rxcp->ip_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
+ rxcp->l4_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
+ rxcp->ipv6 =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
+ rxcp->rxq_idx =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
+ rxcp->num_rcvd =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
+ rxcp->pkt_type =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
+ rxcp->rss_hash =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
+ if (rxcp->vlanf) {
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+ compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
+ compl);
+ }
+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
+}
+
+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
+ struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
+ rxcp->ip_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
+ rxcp->l4_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
+ rxcp->ipv6 =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
+ rxcp->rxq_idx =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
+ rxcp->num_rcvd =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
+ rxcp->pkt_type =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
+ rxcp->rss_hash =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
+ if (rxcp->vlanf) {
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+ compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
+ compl);
+ }
+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
+}
+
+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
+{
+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
+ struct be_adapter *adapter = rxo->adapter;
+
+ /* For checking the valid bit it is Ok to use either definition as the
+ * valid bit is at the same position in both v0 and v1 Rx compl */
+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
+ return NULL;
+
+ rmb();
+ be_dws_le_to_cpu(compl, sizeof(*compl));
+
+ if (adapter->be3_native)
+ be_parse_rx_compl_v1(adapter, compl, rxcp);
+ else
+ be_parse_rx_compl_v0(adapter, compl, rxcp);
+
+ if (rxcp->vlanf) {
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->function_mode & 0x400) && !rxcp->vtm)
+ rxcp->vlanf = 0;
+
+ if (!lancer_chip(adapter))
+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
+
+ if (((adapter->pvid & VLAN_VID_MASK) ==
+ (rxcp->vlan_tag & VLAN_VID_MASK)) &&
+ !adapter->vlan_tag[rxcp->vlan_tag])
+ rxcp->vlanf = 0;
+ }
+
+ /* As the compl has been parsed, reset it; we wont touch it again */
+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
+
+ queue_tail_inc(&rxo->cq);
+ return rxcp;
+}
+
+static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
+{
+ u32 order = get_order(size);
+
+ if (order > 0)
+ gfp |= __GFP_COMP;
+ return alloc_pages(gfp, order);
+}
+
+/*
+ * Allocate a page, split it to fragments of size rx_frag_size and post as
+ * receive buffers to BE
+ */
+static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
+{
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
+ struct be_queue_info *rxq = &rxo->q;
+ struct page *pagep = NULL;
+ struct be_eth_rx_d *rxd;
+ u64 page_dmaaddr = 0, frag_dmaaddr;
+ u32 posted, page_offset = 0;
+
+ page_info = &rxo->page_info_tbl[rxq->head];
+ for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
+ if (!pagep) {
+ pagep = be_alloc_pages(adapter->big_page_size, gfp);
+ if (unlikely(!pagep)) {
+ rx_stats(rxo)->rx_post_fail++;
+ break;
+ }
+ page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+ 0, adapter->big_page_size,
+ DMA_FROM_DEVICE);
+ page_info->page_offset = 0;
+ } else {
+ get_page(pagep);
+ page_info->page_offset = page_offset + rx_frag_size;
+ }
+ page_offset = page_info->page_offset;
+ page_info->page = pagep;
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
+ frag_dmaaddr = page_dmaaddr + page_info->page_offset;
+
+ rxd = queue_head_node(rxq);
+ rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
+ rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
+
+ /* Any space left in the current big page for another frag? */
+ if ((page_offset + rx_frag_size + rx_frag_size) >
+ adapter->big_page_size) {
+ pagep = NULL;
+ page_info->last_page_user = true;
+ }
+
+ prev_page_info = page_info;
+ queue_head_inc(rxq);
+ page_info = &page_info_tbl[rxq->head];
+ }
+ if (pagep)
+ prev_page_info->last_page_user = true;
+
+ if (posted) {
+ atomic_add(posted, &rxq->used);
+ be_rxq_notify(adapter, rxq->id, posted);
+ } else if (atomic_read(&rxq->used) == 0) {
+ /* Let be_worker replenish when memory is available */
+ rxo->rx_post_starved = true;
+ }
+}
+
+static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
+{
+ struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
+
+ if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
+ return NULL;
+
+ rmb();
+ be_dws_le_to_cpu(txcp, sizeof(*txcp));
+
+ txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
+
+ queue_tail_inc(tx_cq);
+ return txcp;
+}
+
+static u16 be_tx_compl_process(struct be_adapter *adapter,
+ struct be_tx_obj *txo, u16 last_index)
+{
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_wrb *wrb;
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
+ struct sk_buff *sent_skb;
+ u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
+ bool unmap_skb_hdr = true;
+
+ sent_skb = sent_skbs[txq->tail];
+ BUG_ON(!sent_skb);
+ sent_skbs[txq->tail] = NULL;
+
+ /* skip header wrb */
+ queue_tail_inc(txq);
+
+ do {
+ cur_index = txq->tail;
+ wrb = queue_tail_node(txq);
+ unmap_tx_frag(&adapter->pdev->dev, wrb,
+ (unmap_skb_hdr && skb_headlen(sent_skb)));
+ unmap_skb_hdr = false;
+
+ num_wrbs++;
+ queue_tail_inc(txq);
+ } while (cur_index != last_index);
+
+ kfree_skb(sent_skb);
+ return num_wrbs;
+}
+
+static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
+{
+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+
+ if (!eqe->evt)
+ return NULL;
+
+ rmb();
+ eqe->evt = le32_to_cpu(eqe->evt);
+ queue_tail_inc(&eq_obj->q);
+ return eqe;
+}
+
+static int event_handle(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj,
+ bool rearm)
+{
+ struct be_eq_entry *eqe;
+ u16 num = 0;
+
+ while ((eqe = event_get(eq_obj)) != NULL) {
+ eqe->evt = 0;
+ num++;
+ }
+
+ /* Deal with any spurious interrupts that come
+ * without events
+ */
+ if (!num)
+ rearm = true;
+
+ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
+ if (num)
+ napi_schedule(&eq_obj->napi);
+
+ return num;
+}
+
+/* Just read and notify events without processing them.
+ * Used at the time of destroying event queues */
+static void be_eq_clean(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj)
+{
+ struct be_eq_entry *eqe;
+ u16 num = 0;
+
+ while ((eqe = event_get(eq_obj)) != NULL) {
+ eqe->evt = 0;
+ num++;
+ }
+
+ if (num)
+ be_eq_notify(adapter, eq_obj->q.id, false, true, num);
+}
+
+static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
+{
+ struct be_rx_page_info *page_info;
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_queue_info *rx_cq = &rxo->cq;
+ struct be_rx_compl_info *rxcp;
+ u16 tail;
+
+ /* First cleanup pending rx completions */
+ while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ be_cq_notify(adapter, rx_cq->id, false, 1);
+ }
+
+ /* Then free posted rx buffer that were not used */
+ tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
+ for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
+ page_info = get_rx_page_info(adapter, rxo, tail);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = rxq->head = 0;
+}
+
+static void be_tx_compl_clean(struct be_adapter *adapter,
+ struct be_tx_obj *txo)
+{
+ struct be_queue_info *tx_cq = &txo->cq;
+ struct be_queue_info *txq = &txo->q;
+ struct be_eth_tx_compl *txcp;
+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
+ struct sk_buff *sent_skb;
+ bool dummy_wrb;
+
+ /* Wait for a max of 200ms for all the tx-completions to arrive. */
+ do {
+ while ((txcp = be_tx_compl_get(tx_cq))) {
+ end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
+ wrb_index, txcp);
+ num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
+ cmpl++;
+ }
+ if (cmpl) {
+ be_cq_notify(adapter, tx_cq->id, false, cmpl);
+ atomic_sub(num_wrbs, &txq->used);
+ cmpl = 0;
+ num_wrbs = 0;
+ }
+
+ if (atomic_read(&txq->used) == 0 || ++timeo > 200)
+ break;
+
+ mdelay(1);
+ } while (true);
+
+ if (atomic_read(&txq->used))
+ dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
+ atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = sent_skbs[txq->tail];
+ end_idx = txq->tail;
+ index_adv(&end_idx,
+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
+ txq->len);
+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
+ atomic_sub(num_wrbs, &txq->used);
+ }
+}
+
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+
+ q = &adapter->mcc_obj.q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
+ be_queue_free(adapter, q);
+
+ q = &adapter->mcc_obj.cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *q, *cq;
+
+ /* Alloc MCC compl queue */
+ cq = &adapter->mcc_obj.cq;
+ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_compl)))
+ goto err;
+
+ /* Ask BE to create MCC compl queue; share TX's eq */
+ if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
+ goto mcc_cq_free;
+
+ /* Alloc MCC queue */
+ q = &adapter->mcc_obj.q;
+ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ /* Ask BE to create MCC queue */
+ if (be_cmd_mccq_create(adapter, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(adapter, q);
+mcc_cq_destroy:
+ be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(adapter, cq);
+err:
+ return -1;
+}
+
+static void be_tx_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_tx_obj *txo;
+ u8 i;
+
+ for_all_tx_queues(adapter, txo, i) {
+ q = &txo->q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
+ be_queue_free(adapter, q);
+
+ q = &txo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+ }
+
+ /* Clear any residual events */
+ be_eq_clean(adapter, &adapter->tx_eq);
+
+ q = &adapter->tx_eq.q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
+ be_queue_free(adapter, q);
+}
+
+/* One TX event queue is shared by all TX compl qs */
+static int be_tx_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq, *q, *cq;
+ struct be_tx_obj *txo;
+ u8 i;
+
+ adapter->tx_eq.max_eqd = 0;
+ adapter->tx_eq.min_eqd = 0;
+ adapter->tx_eq.cur_eqd = 96;
+ adapter->tx_eq.enable_aic = false;
+
+ eq = &adapter->tx_eq.q;
+ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry)))
+ return -1;
+
+ if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
+ goto err;
+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
+
+ for_all_tx_queues(adapter, txo, i) {
+ cq = &txo->cq;
+ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
+ sizeof(struct be_eth_tx_compl)))
+ goto err;
+
+ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
+ goto err;
+
+ q = &txo->q;
+ if (be_queue_alloc(adapter, q, TX_Q_LEN,
+ sizeof(struct be_eth_wrb)))
+ goto err;
+
+ if (be_cmd_txq_create(adapter, q, cq))
+ goto err;
+ }
+ return 0;
+
+err:
+ be_tx_queues_destroy(adapter);
+ return -1;
+}
+
+static void be_rx_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_queue_free(adapter, &rxo->q);
+
+ q = &rxo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+
+ q = &rxo->rx_eq.q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
+ be_queue_free(adapter, q);
+ }
+}
+
+static u32 be_num_rxqs_want(struct be_adapter *adapter)
+{
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "No support for multiple RX queues\n");
+ return 1;
+ }
+}
+
+static int be_rx_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *eq, *q, *cq;
+ struct be_rx_obj *rxo;
+ int rc, i;
+
+ adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
+ msix_enabled(adapter) ?
+ adapter->num_msix_vec - 1 : 1);
+ if (adapter->num_rx_qs != MAX_RX_QS)
+ dev_warn(&adapter->pdev->dev,
+ "Can create only %d RX queues", adapter->num_rx_qs);
+
+ adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
+ for_all_rx_queues(adapter, rxo, i) {
+ rxo->adapter = adapter;
+ rxo->rx_eq.max_eqd = BE_MAX_EQD;
+ rxo->rx_eq.enable_aic = true;
+
+ /* EQ */
+ eq = &rxo->rx_eq.q;
+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry));
+ if (rc)
+ goto err;
+
+ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
+ if (rc)
+ goto err;
+
+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
+
+ /* CQ */
+ cq = &rxo->cq;
+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
+ sizeof(struct be_eth_rx_compl));
+ if (rc)
+ goto err;
+
+ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
+ if (rc)
+ goto err;
+
+ /* Rx Q - will be created in be_open() */
+ q = &rxo->q;
+ rc = be_queue_alloc(adapter, q, RX_Q_LEN,
+ sizeof(struct be_eth_rx_d));
+ if (rc)
+ goto err;
+
+ }
+
+ return 0;
+err:
+ be_rx_queues_destroy(adapter);
+ return -1;
+}
+
+static bool event_peek(struct be_eq_obj *eq_obj)
+{
+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+ if (!eqe->evt)
+ return false;
+ else
+ return true;
+}
+
+static irqreturn_t be_intx(int irq, void *dev)
+{
+ struct be_adapter *adapter = dev;
+ struct be_rx_obj *rxo;
+ int isr, i, tx = 0 , rx = 0;
+
+ if (lancer_chip(adapter)) {
+ if (event_peek(&adapter->tx_eq))
+ tx = event_handle(adapter, &adapter->tx_eq, false);
+ for_all_rx_queues(adapter, rxo, i) {
+ if (event_peek(&rxo->rx_eq))
+ rx |= event_handle(adapter, &rxo->rx_eq, true);
+ }
+
+ if (!(tx || rx))
+ return IRQ_NONE;
+
+ } else {
+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
+ if (!isr)
+ return IRQ_NONE;
+
+ if ((1 << adapter->tx_eq.eq_idx & isr))
+ event_handle(adapter, &adapter->tx_eq, false);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ if ((1 << rxo->rx_eq.eq_idx & isr))
+ event_handle(adapter, &rxo->rx_eq, true);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t be_msix_rx(int irq, void *dev)
+{
+ struct be_rx_obj *rxo = dev;
+ struct be_adapter *adapter = rxo->adapter;
+
+ event_handle(adapter, &rxo->rx_eq, true);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
+{
+ struct be_adapter *adapter = dev;
+
+ event_handle(adapter, &adapter->tx_eq, false);
+
+ return IRQ_HANDLED;
+}
+
+static inline bool do_gro(struct be_rx_compl_info *rxcp)
+{
+ return (rxcp->tcpf && !rxcp->err) ? true : false;
+}
+
+static int be_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
+ struct be_adapter *adapter = rxo->adapter;
+ struct be_queue_info *rx_cq = &rxo->cq;
+ struct be_rx_compl_info *rxcp;
+ u32 work_done;
+
+ rx_stats(rxo)->rx_polls++;
+ for (work_done = 0; work_done < budget; work_done++) {
+ rxcp = be_rx_compl_get(rxo);
+ if (!rxcp)
+ break;
+
+ /* Is it a flush compl that has no data */
+ if (unlikely(rxcp->num_rcvd == 0))
+ goto loop_continue;
+
+ /* Discard compl with partial DMA Lancer B0 */
+ if (unlikely(!rxcp->pkt_size)) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ goto loop_continue;
+ }
+
+ /* On BE drop pkts that arrive due to imperfect filtering in
+ * promiscuous mode on some skews
+ */
+ if (unlikely(rxcp->port != adapter->port_num &&
+ !lancer_chip(adapter))) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
+ goto loop_continue;
+ }
+
+ if (do_gro(rxcp))
+ be_rx_compl_process_gro(adapter, rxo, rxcp);
+ else
+ be_rx_compl_process(adapter, rxo, rxcp);
+loop_continue:
+ be_rx_stats_update(rxo, rxcp);
+ }
+
+ /* Refill the queue */
+ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
+ be_post_rx_frags(rxo, GFP_ATOMIC);
+
+ /* All consumed */
+ if (work_done < budget) {
+ napi_complete(napi);
+ be_cq_notify(adapter, rx_cq->id, true, work_done);
+ } else {
+ /* More to be consumed; continue with interrupts disabled */
+ be_cq_notify(adapter, rx_cq->id, false, work_done);
+ }
+ return work_done;
+}
+
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
+ struct be_tx_obj *txo;
+ struct be_eth_tx_compl *txcp;
+ int tx_compl, mcc_compl, status = 0;
+ u8 i;
+ u16 num_wrbs;
+
+ for_all_tx_queues(adapter, txo, i) {
+ tx_compl = 0;
+ num_wrbs = 0;
+ while ((txcp = be_tx_compl_get(&txo->cq))) {
+ num_wrbs += be_tx_compl_process(adapter, txo,
+ AMAP_GET_BITS(struct amap_eth_tx_compl,
+ wrb_index, txcp));
+ tx_compl++;
+ }
+ if (tx_compl) {
+ be_cq_notify(adapter, txo->cq.id, true, tx_compl);
+
+ atomic_sub(num_wrbs, &txo->q.used);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue
+ * if it was stopped due to lack of tx wrbs. */
+ if (__netif_subqueue_stopped(adapter->netdev, i) &&
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
+ netif_wake_subqueue(adapter->netdev, i);
+ }
+
+ u64_stats_update_begin(&tx_stats(txo)->sync_compl);
+ tx_stats(txo)->tx_compl += tx_compl;
+ u64_stats_update_end(&tx_stats(txo)->sync_compl);
+ }
+ }
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
+ }
+
+ napi_complete(napi);
+
+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
+ adapter->drv_stats.tx_events++;
+ return 1;
+}
+
+void be_detect_dump_ue(struct be_adapter *adapter)
+{
+ u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
+ u32 i;
+
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_LOW, &ue_status_lo);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_HIGH, &ue_status_hi);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
+ pci_read_config_dword(adapter->pdev,
+ PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
+
+ ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
+ ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
+
+ if (ue_status_lo || ue_status_hi) {
+ adapter->ue_detected = true;
+ adapter->eeh_err = true;
+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ }
+
+ if (ue_status_lo) {
+ for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
+ if (ue_status_lo & 1)
+ dev_err(&adapter->pdev->dev,
+ "UE: %s bit set\n", ue_status_low_desc[i]);
+ }
+ }
+ if (ue_status_hi) {
+ for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
+ if (ue_status_hi & 1)
+ dev_err(&adapter->pdev->dev,
+ "UE: %s bit set\n", ue_status_hi_desc[i]);
+ }
+ }
+
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (!adapter->ue_detected && !lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_rx_eqd_update(adapter, rxo);
+
+ if (rxo->rx_post_starved) {
+ rxo->rx_post_starved = false;
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ }
+ }
+
+reschedule:
+ adapter->work_counter++;
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
+static void be_msix_disable(struct be_adapter *adapter)
+{
+ if (msix_enabled(adapter)) {
+ pci_disable_msix(adapter->pdev);
+ adapter->num_msix_vec = 0;
+ }
+}
+
+static void be_msix_enable(struct be_adapter *adapter)
+{
+#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
+ int i, status, num_vec;
+
+ num_vec = be_num_rxqs_want(adapter) + 1;
+
+ for (i = 0; i < num_vec; i++)
+ adapter->msix_entries[i].entry = i;
+
+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
+ if (status == 0) {
+ goto done;
+ } else if (status >= BE_MIN_MSIX_VECTORS) {
+ num_vec = status;
+ if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ num_vec) == 0)
+ goto done;
+ }
+ return;
+done:
+ adapter->num_msix_vec = num_vec;
+ return;
+}
+
+static void be_sriov_enable(struct be_adapter *adapter)
+{
+ be_check_sriov_fn_type(adapter);
+#ifdef CONFIG_PCI_IOV
+ if (be_physfn(adapter) && num_vfs) {
+ int status, pos;
+ u16 nvfs;
+
+ pos = pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(adapter->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+
+ if (num_vfs > nvfs) {
+ dev_info(&adapter->pdev->dev,
+ "Device supports %d VFs and not %d\n",
+ nvfs, num_vfs);
+ num_vfs = nvfs;
+ }
+
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ adapter->sriov_enabled = status ? false : true;
+ }
+#endif
+}
+
+static void be_sriov_disable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ if (adapter->sriov_enabled) {
+ pci_disable_sriov(adapter->pdev);
+ adapter->sriov_enabled = false;
+ }
+#endif
+}
+
+static inline int be_msix_vec_get(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj)
+{
+ return adapter->msix_entries[eq_obj->eq_idx].vector;
+}
+
+static int be_request_irq(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj,
+ void *handler, char *desc, void *context)
+{
+ struct net_device *netdev = adapter->netdev;
+ int vec;
+
+ sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
+ vec = be_msix_vec_get(adapter, eq_obj);
+ return request_irq(vec, handler, 0, eq_obj->desc, context);
+}
+
+static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
+ void *context)
+{
+ int vec = be_msix_vec_get(adapter, eq_obj);
+ free_irq(vec, context);
+}
+
+static int be_msix_register(struct be_adapter *adapter)
+{
+ struct be_rx_obj *rxo;
+ int status, i;
+ char qname[10];
+
+ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
+ adapter);
+ if (status)
+ goto err;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ sprintf(qname, "rxq%d", i);
+ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
+ qname, rxo);
+ if (status)
+ goto err_msix;
+ }
+
+ return 0;
+
+err_msix:
+ be_free_irq(adapter, &adapter->tx_eq, adapter);
+
+ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
+ be_free_irq(adapter, &rxo->rx_eq, rxo);
+
+err:
+ dev_warn(&adapter->pdev->dev,
+ "MSIX Request IRQ failed - err %d\n", status);
+ be_msix_disable(adapter);
+ return status;
+}
+
+static int be_irq_register(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (msix_enabled(adapter)) {
+ status = be_msix_register(adapter);
+ if (status == 0)
+ goto done;
+ /* INTx is not supported for VF */
+ if (!be_physfn(adapter))
+ return status;
+ }
+
+ /* INTx */
+ netdev->irq = adapter->pdev->irq;
+ status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
+ adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "INTx request IRQ failed - err %d\n", status);
+ return status;
+ }
+done:
+ adapter->isr_registered = true;
+ return 0;
+}
+
+static void be_irq_unregister(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (!adapter->isr_registered)
+ return;
+
+ /* INTx */
+ if (!msix_enabled(adapter)) {
+ free_irq(netdev->irq, adapter);
+ goto done;
+ }
+
+ /* MSIx */
+ be_free_irq(adapter, &adapter->tx_eq, adapter);
+
+ for_all_rx_queues(adapter, rxo, i)
+ be_free_irq(adapter, &rxo->rx_eq, rxo);
+
+done:
+ adapter->isr_registered = false;
+}
+
+static void be_rx_queues_clear(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ q = &rxo->q;
+ if (q->created) {
+ be_cmd_rxq_destroy(adapter, q);
+ /* After the rxq is invalidated, wait for a grace time
+ * of 1ms for all dma to end and the flush compl to
+ * arrive
+ */
+ mdelay(1);
+ be_rx_q_clean(adapter, rxo);
+ }
+
+ /* Clear any residual events */
+ q = &rxo->rx_eq.q;
+ if (q->created)
+ be_eq_clean(adapter, &rxo->rx_eq);
+ }
+}
+
+static int be_close(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ int vec, i;
+
+ be_async_mcc_disable(adapter);
+
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
+ for_all_rx_queues(adapter, rxo, i)
+ napi_disable(&rxo->rx_eq.napi);
+
+ napi_disable(&tx_eq->napi);
+
+ if (lancer_chip(adapter)) {
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+ for_all_rx_queues(adapter, rxo, i)
+ be_cq_notify(adapter, rxo->cq.id, false, 0);
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, false, 0);
+ }
+
+ if (msix_enabled(adapter)) {
+ vec = be_msix_vec_get(adapter, tx_eq);
+ synchronize_irq(vec);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
+ synchronize_irq(vec);
+ }
+ } else {
+ synchronize_irq(netdev->irq);
+ }
+ be_irq_unregister(adapter);
+
+ /* Wait for all pending tx completions to arrive so that
+ * all tx skbs are freed.
+ */
+ for_all_tx_queues(adapter, txo, i)
+ be_tx_compl_clean(adapter, txo);
+
+ be_rx_queues_clear(adapter);
+ return 0;
+}
+
+static int be_rx_queues_setup(struct be_adapter *adapter)
+{
+ struct be_rx_obj *rxo;
+ int rc, i;
+ u8 rsstable[MAX_RSS_QS];
+
+ for_all_rx_queues(adapter, rxo, i) {
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
+ adapter->if_handle,
+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
+ if (rc)
+ return rc;
+ }
+
+ if (be_multi_rxq(adapter)) {
+ for_all_rss_queues(adapter, rxo, i)
+ rsstable[i] = rxo->rss_id;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->num_rx_qs - 1);
+ if (rc)
+ return rc;
+ }
+
+ /* First time posting */
+ for_all_rx_queues(adapter, rxo, i) {
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ napi_enable(&rxo->rx_eq.napi);
+ }
+ return 0;
+}
+
+static int be_open(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ struct be_rx_obj *rxo;
+ int status, i;
+
+ status = be_rx_queues_setup(adapter);
+ if (status)
+ goto err;
+
+ napi_enable(&tx_eq->napi);
+
+ be_irq_register(adapter);
+
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, true);
+
+ /* The evt queues are created in unarmed state; arm them */
+ for_all_rx_queues(adapter, rxo, i) {
+ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
+ be_cq_notify(adapter, rxo->cq.id, true, 0);
+ }
+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
+
+ /* Now that interrupts are on we can process async mcc */
+ be_async_mcc_enable(adapter);
+
+ if (be_physfn(adapter)) {
+ status = be_vid_config(adapter, false, 0);
+ if (status)
+ goto err;
+
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ goto err;
+ }
+
+ return 0;
+err:
+ be_close(adapter->netdev);
+ return -EIO;
+}
+
+static int be_setup_wol(struct be_adapter *adapter, bool enable)
+{
+ struct be_dma_mem cmd;
+ int status = 0;
+ u8 mac[ETH_ALEN];
+
+ memset(mac, 0, ETH_ALEN);
+
+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
+ if (cmd.va == NULL)
+ return -1;
+ memset(cmd.va, 0, cmd.size);
+
+ if (enable) {
+ status = pci_write_config_dword(adapter->pdev,
+ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Could not enable Wake-on-lan\n");
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
+ return status;
+ }
+ status = be_cmd_enable_magic_wol(adapter,
+ adapter->netdev->dev_addr, &cmd);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
+ } else {
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ return status;
+}
+
+/*
+ * Generate a seed MAC address from the PF MAC Address using jhash.
+ * MAC Address for VFs are assigned incrementally starting from the seed.
+ * These addresses are programmed in the ASIC by the PF and the VF driver
+ * queries for the MAC address during its probe.
+ */
+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
+{
+ u32 vf = 0;
+ int status = 0;
+ u8 mac[ETH_ALEN];
+
+ be_vf_eth_addr_generate(adapter, mac);
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_pmac_add(adapter, mac,
+ adapter->vf_cfg[vf].vf_if_handle,
+ &adapter->vf_cfg[vf].vf_pmac_id,
+ vf + 1);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Mac address add failed for VF %d\n", vf);
+ else
+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+
+ mac[5] += 1;
+ }
+ return status;
+}
+
+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
+{
+ u32 vf;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
+ be_cmd_pmac_del(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ }
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 cap_flags, en_flags, vf = 0;
+ int status;
+ u8 mac[ETH_ALEN];
+
+ be_cmd_req_native_mode(adapter);
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+
+ if (be_physfn(adapter)) {
+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
+
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
+ cap_flags |= BE_IF_FLAGS_RSS;
+ en_flags |= BE_IF_FLAGS_RSS;
+ }
+ }
+
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ netdev->dev_addr, false/* pmac_invalid */,
+ &adapter->if_handle, &adapter->pmac_id, 0);
+ if (status != 0)
+ goto do_none;
+
+ if (be_physfn(adapter)) {
+ if (adapter->sriov_enabled) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags,
+ en_flags, mac, true,
+ &adapter->vf_cfg[vf].vf_if_handle,
+ NULL, vf+1);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n",
+ vf);
+ goto if_destroy;
+ }
+ adapter->vf_cfg[vf].vf_pmac_id =
+ BE_INVALID_PMAC_ID;
+ vf++;
+ }
+ }
+ } else {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
+
+ status = be_tx_queues_create(adapter);
+ if (status != 0)
+ goto if_destroy;
+
+ status = be_rx_queues_create(adapter);
+ if (status != 0)
+ goto tx_qs_destroy;
+
+ /* Allow all priorities by default. A GRP5 evt may modify this */
+ adapter->vlan_prio_bmap = 0xff;
+
+ status = be_mcc_queues_create(adapter);
+ if (status != 0)
+ goto rx_qs_destroy;
+
+ adapter->link_speed = -1;
+
+ return 0;
+
+rx_qs_destroy:
+ be_rx_queues_destroy(adapter);
+tx_qs_destroy:
+ be_tx_queues_destroy(adapter);
+if_destroy:
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+do_none:
+ return status;
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
+ int vf;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ be_vf_eth_addr_rem(adapter);
+
+ be_mcc_queues_destroy(adapter);
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ adapter->eq_next_idx = 0;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+
+ adapter->be3_native = 0;
+
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+ return 0;
+}
+
+
+#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
+static bool be_flash_redboot(struct be_adapter *adapter,
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
+{
+ u32 crc_offset;
+ u8 flashed_crc[4];
+ int status;
+
+ crc_offset = hdr_size + img_start + image_size - 4;
+
+ p += crc_offset;
+
+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
+ (image_size - 4));
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "could not get crc from flash, not flashing redboot\n");
+ return false;
+ }
+
+ /*update redboot only if crc does not match*/
+ if (!memcmp(flashed_crc, p, 4))
+ return false;
+ else
+ return true;
+}
+
+static bool phy_flashing_required(struct be_adapter *adapter)
+{
+ int status = 0;
+ struct be_phy_info phy_info;
+
+ status = be_cmd_get_phy_info(adapter, &phy_info);
+ if (status)
+ return false;
+ if ((phy_info.phy_type == TN_8022) &&
+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
+ return true;
+ }
+ return false;
+}
+
+static int be_flash_data(struct be_adapter *adapter,
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
+
+{
+ int status = 0, i, filehdr_size = 0;
+ u32 total_bytes = 0, flash_op;
+ int num_bytes;
+ const u8 *p = fw->data;
+ struct be_cmd_write_flashrom *req = flash_cmd->va;
+ const struct flash_comp *pflashcomp;
+ int num_comp;
+
+ static const struct flash_comp gen3_flash_types[10] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
+ FLASH_NCSI_IMAGE_MAX_SIZE_g3},
+ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
+ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
+ };
+ static const struct flash_comp gen2_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2}
+ };
+
+ if (adapter->generation == BE_GEN3) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ num_comp = ARRAY_SIZE(gen3_flash_types);
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
+ num_comp = ARRAY_SIZE(gen2_flash_types);
+ }
+ for (i = 0; i < num_comp; i++) {
+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+ continue;
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
+ if (!phy_flashing_required(adapter))
+ continue;
+ }
+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
+ (!be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+ (num_of_images * sizeof(struct image_hdr)))))
+ continue;
+ p = fw->data;
+ p += filehdr_size + pflashcomp[i].offset
+ + (num_of_images * sizeof(struct image_hdr));
+ if (p + pflashcomp[i].size > fw->data + fw->size)
+ return -1;
+ total_bytes = pflashcomp[i].size;
+ while (total_bytes) {
+ if (total_bytes > 32*1024)
+ num_bytes = 32*1024;
+ else
+ num_bytes = total_bytes;
+ total_bytes -= num_bytes;
+ if (!total_bytes) {
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_FLASH;
+ else
+ flash_op = FLASHROM_OPER_FLASH;
+ } else {
+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+ flash_op = FLASHROM_OPER_PHY_SAVE;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ }
+ memcpy(req->params.data_buf, p, num_bytes);
+ p += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd,
+ pflashcomp[i].optype, flash_op, num_bytes);
+ if (status) {
+ if ((status == ILLEGAL_IOCTL_REQ) &&
+ (pflashcomp[i].optype ==
+ IMG_TYPE_PHY_FW))
+ break;
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ return 0;
+ if (fhdr->build[0] == '3')
+ return BE_GEN3;
+ else if (fhdr->build[0] == '2')
+ return BE_GEN2;
+ else
+ return 0;
+}
+
+static int lancer_fw_download(struct be_adapter *adapter,
+ const struct firmware *fw)
+{
+#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
+#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
+ struct be_dma_mem flash_cmd;
+ const u8 *data_ptr = NULL;
+ u8 *dest_image_ptr = NULL;
+ size_t image_size = 0;
+ u32 chunk_size = 0;
+ u32 data_written = 0;
+ u32 offset = 0;
+ int status = 0;
+ u8 add_status = 0;
+
+ if (!IS_ALIGNED(fw->size, sizeof(u32))) {
+ dev_err(&adapter->pdev->dev,
+ "FW Image not properly aligned. "
+ "Length must be 4 byte aligned.\n");
+ status = -EINVAL;
+ goto lancer_fw_exit;
+ }
+
+ flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ + LANCER_FW_DOWNLOAD_CHUNK;
+ flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
+ if (!flash_cmd.va) {
+ status = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while flashing\n");
+ goto lancer_fw_exit;
+ }
+
+ dest_image_ptr = flash_cmd.va +
+ sizeof(struct lancer_cmd_req_write_object);
+ image_size = fw->size;
+ data_ptr = fw->data;
+
+ while (image_size) {
+ chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
+
+ /* Copy the image chunk content. */
+ memcpy(dest_image_ptr, data_ptr, chunk_size);
+
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &add_status);
+
+ if (status)
+ break;
+
+ offset += data_written;
+ data_ptr += data_written;
+ image_size -= data_written;
+ }
+
+ if (!status) {
+ /* Commit the FW written */
+ status = lancer_cmd_write_object(adapter, &flash_cmd,
+ 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &add_status);
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+ flash_cmd.dma);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load error. "
+ "Status code: 0x%x Additional Status: 0x%x\n",
+ status, add_status);
+ goto lancer_fw_exit;
+ }
+
+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
+lancer_fw_exit:
+ return status;
+}
+
+static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
+{
+ struct flash_file_hdr_g2 *fhdr;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr = NULL;
+ struct be_dma_mem flash_cmd;
+ const u8 *p;
+ int status = 0, i = 0, num_imgs = 0;
+
+ p = fw->data;
+ fhdr = (struct flash_file_hdr_g2 *) p;
+
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
+ flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
+ if (!flash_cmd.va) {
+ status = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while flashing\n");
+ goto be_fw_exit;
+ }
+
+ if ((adapter->generation == BE_GEN3) &&
+ (get_ufigen_type(fhdr) == BE_GEN3)) {
+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
+ for (i = 0; i < num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *) (fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
+ status = be_flash_data(adapter, fw, &flash_cmd,
+ num_imgs);
+ }
+ } else if ((adapter->generation == BE_GEN2) &&
+ (get_ufigen_type(fhdr) == BE_GEN2)) {
+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ status = -1;
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+ flash_cmd.dma);
+ if (status) {
+ dev_err(&adapter->pdev->dev, "Firmware load error\n");
+ goto be_fw_exit;
+ }
+
+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
+
+be_fw_exit:
+ return status;
+}
+
+int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
+{
+ const struct firmware *fw;
+ int status;
+
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -1;
+ }
+
+ status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
+ if (status)
+ goto fw_exit;
+
+ dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
+
+ if (lancer_chip(adapter))
+ status = lancer_fw_download(adapter, fw);
+ else
+ status = be_fw_download(adapter, fw);
+
+fw_exit:
+ release_firmware(fw);
+ return status;
+}
+
+static struct net_device_ops be_netdev_ops = {
+ .ndo_open = be_open,
+ .ndo_stop = be_close,
+ .ndo_start_xmit = be_xmit,
+ .ndo_set_rx_mode = be_set_multicast_list,
+ .ndo_set_mac_address = be_mac_addr_set,
+ .ndo_change_mtu = be_change_mtu,
+ .ndo_get_stats64 = be_get_stats64,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = be_vlan_add_vid,
+ .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
+ .ndo_set_vf_mac = be_set_vf_mac,
+ .ndo_set_vf_vlan = be_set_vf_vlan,
+ .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
+ .ndo_get_vf_config = be_get_vf_config
+};
+
+static void be_netdev_init(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ int i;
+
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX;
+ if (be_multi_rxq(adapter))
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ netdev->flags |= IFF_MULTICAST;
+
+ /* Default settings for Rx and Tx flow control */
+ adapter->rx_fc = true;
+ adapter->tx_fc = true;
+
+ netif_set_gso_max_size(netdev, 65535);
+
+ BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
+
+ SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+
+ for_all_rx_queues(adapter, rxo, i)
+ netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
+ BE_NAPI_WEIGHT);
+
+ netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
+ BE_NAPI_WEIGHT);
+}
+
+static void be_unmap_pci_bars(struct be_adapter *adapter)
+{
+ if (adapter->csr)
+ iounmap(adapter->csr);
+ if (adapter->db)
+ iounmap(adapter->db);
+ if (adapter->pcicfg && be_physfn(adapter))
+ iounmap(adapter->pcicfg);
+}
+
+static int be_map_pci_bars(struct be_adapter *adapter)
+{
+ u8 __iomem *addr;
+ int pcicfg_reg, db_reg;
+
+ if (lancer_chip(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
+ pci_resource_len(adapter->pdev, 0));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->db = addr;
+ return 0;
+ }
+
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
+ pci_resource_len(adapter->pdev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->csr = addr;
+ }
+
+ if (adapter->generation == BE_GEN2) {
+ pcicfg_reg = 1;
+ db_reg = 4;
+ } else {
+ pcicfg_reg = 0;
+ if (be_physfn(adapter))
+ db_reg = 4;
+ else
+ db_reg = 0;
+ }
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
+ pci_resource_len(adapter->pdev, db_reg));
+ if (addr == NULL)
+ goto pci_map_err;
+ adapter->db = addr;
+
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(
+ pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (addr == NULL)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+
+ return 0;
+pci_map_err:
+ be_unmap_pci_bars(adapter);
+ return -ENOMEM;
+}
+
+
+static void be_ctrl_cleanup(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
+
+ be_unmap_pci_bars(adapter);
+
+ if (mem->va)
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
+
+ mem = &adapter->rx_filter;
+ if (mem->va)
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
+}
+
+static int be_ctrl_init(struct be_adapter *adapter)
+{
+ struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
+ struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
+ int status;
+
+ status = be_map_pci_bars(adapter);
+ if (status)
+ goto done;
+
+ mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
+ mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
+ if (!mbox_mem_alloc->va) {
+ status = -ENOMEM;
+ goto unmap_pci_bars;
+ }
+ mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
+ mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
+ mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
+ memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
+
+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
+ rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
+ &rx_filter->dma, GFP_KERNEL);
+ if (rx_filter->va == NULL) {
+ status = -ENOMEM;
+ goto free_mbox;
+ }
+ memset(rx_filter->va, 0, rx_filter->size);
+
+ mutex_init(&adapter->mbox_lock);
+ spin_lock_init(&adapter->mcc_lock);
+ spin_lock_init(&adapter->mcc_cq_lock);
+
+ init_completion(&adapter->flash_compl);
+ pci_save_state(adapter->pdev);
+ return 0;
+
+free_mbox:
+ dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
+
+unmap_pci_bars:
+ be_unmap_pci_bars(adapter);
+
+done:
+ return status;
+}
+
+static void be_stats_cleanup(struct be_adapter *adapter)
+{
+ struct be_dma_mem *cmd = &adapter->stats_cmd;
+
+ if (cmd->va)
+ dma_free_coherent(&adapter->pdev->dev, cmd->size,
+ cmd->va, cmd->dma);
+}
+
+static int be_stats_init(struct be_adapter *adapter)
+{
+ struct be_dma_mem *cmd = &adapter->stats_cmd;
+
+ if (adapter->generation == BE_GEN2) {
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
+ } else {
+ if (lancer_chip(adapter))
+ cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
+ else
+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+ }
+ cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
+ if (cmd->va == NULL)
+ return -1;
+ memset(cmd->va, 0, cmd->size);
+ return 0;
+}
+
+static void __devexit be_remove(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!adapter)
+ return;
+
+ cancel_delayed_work_sync(&adapter->work);
+
+ unregister_netdev(adapter->netdev);
+
+ be_clear(adapter);
+
+ be_stats_cleanup(adapter);
+
+ be_ctrl_cleanup(adapter);
+
+ kfree(adapter->vf_cfg);
+ be_sriov_disable(adapter);
+
+ be_msix_disable(adapter);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ free_netdev(adapter->netdev);
+}
+
+static int be_get_config(struct be_adapter *adapter)
+{
+ int status;
+ u8 mac[ETH_ALEN];
+
+ status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
+ if (status)
+ return status;
+
+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
+ &adapter->function_mode, &adapter->function_caps);
+ if (status)
+ return status;
+
+ memset(mac, 0, ETH_ALEN);
+
+ /* A default permanent address is given to each VF for Lancer*/
+ if (be_physfn(adapter) || lancer_chip(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
+
+ if (status)
+ return status;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+
+ if (adapter->function_mode & 0x400)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
+
+ if ((num_vfs && adapter->sriov_enabled) ||
+ (adapter->function_mode & 0x400) ||
+ lancer_chip(adapter) || !be_physfn(adapter)) {
+ adapter->num_tx_qs = 1;
+ netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_qs);
+ } else {
+ adapter->num_tx_qs = MAX_TX_QS;
+ }
+
+ return 0;
+}
+
+static int be_dev_family_check(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 sli_intf = 0, if_type;
+
+ switch (pdev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ adapter->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID2:
+ adapter->generation = BE_GEN3;
+ break;
+ case OC_DEVICE_ID3:
+ case OC_DEVICE_ID4:
+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+ SLI_INTF_IF_TYPE_SHIFT;
+
+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
+ if_type != 0x02) {
+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
+ return -EINVAL;
+ }
+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT);
+ adapter->generation = BE_GEN3;
+ break;
+ default:
+ adapter->generation = 0;
+ }
+ return 0;
+}
+
+static int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 500
+ u32 sliport_status;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ break;
+
+ msleep(20);
+ }
+
+ if (i == SLIPORT_READY_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status, err, reset_needed;
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+ if (err && reset_needed) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+
+ /* check adapter has corrected the error */
+ status = lancer_wait_ready(adapter);
+ sliport_status = ioread32(adapter->db +
+ SLIPORT_STATUS_OFFSET);
+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+ SLIPORT_STATUS_RN_MASK);
+ if (status || sliport_status)
+ status = -1;
+ } else if (err || reset_needed) {
+ status = -1;
+ }
+ }
+ return status;
+}
+
+static int __devinit be_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pdev_id)
+{
+ int status = 0;
+ struct be_adapter *adapter;
+ struct net_device *netdev;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ goto do_none;
+
+ status = pci_request_regions(pdev, DRV_NAME);
+ if (status)
+ goto disable_dev;
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
+ if (netdev == NULL) {
+ status = -ENOMEM;
+ goto rel_reg;
+ }
+ adapter = netdev_priv(netdev);
+ adapter->pdev = pdev;
+ pci_set_drvdata(pdev, adapter);
+
+ status = be_dev_family_check(adapter);
+ if (status)
+ goto free_netdev;
+
+ adapter->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!status) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ } else {
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (status) {
+ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
+ goto free_netdev;
+ }
+ }
+
+ be_sriov_enable(adapter);
+ if (adapter->sriov_enabled) {
+ adapter->vf_cfg = kcalloc(num_vfs,
+ sizeof(struct be_vf_cfg), GFP_KERNEL);
+
+ if (!adapter->vf_cfg)
+ goto free_netdev;
+ }
+
+ status = be_ctrl_init(adapter);
+ if (status)
+ goto free_vf_cfg;
+
+ if (lancer_chip(adapter)) {
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status) {
+ dev_err(&pdev->dev, "Adapter in non recoverable error\n");
+ goto ctrl_clean;
+ }
+ }
+
+ /* sync up with fw's ready state */
+ if (be_physfn(adapter)) {
+ status = be_cmd_POST(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_stats_init(adapter);
+ if (status)
+ goto ctrl_clean;
+
+ status = be_get_config(adapter);
+ if (status)
+ goto stats_clean;
+
+ /* The INTR bit may be set in the card when probed by a kdump kernel
+ * after a crash.
+ */
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
+ be_msix_enable(adapter);
+
+ INIT_DELAYED_WORK(&adapter->work, be_worker);
+
+ status = be_setup(adapter);
+ if (status)
+ goto msix_disable;
+
+ be_netdev_init(netdev);
+ status = register_netdev(netdev);
+ if (status != 0)
+ goto unsetup;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled) {
+ u8 mac_speed;
+ u16 vf, lnk_speed;
+
+ if (!lancer_chip(adapter)) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto unreg_netdev;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_link_status_query(adapter, &mac_speed,
+ &lnk_speed, vf + 1);
+ if (!status)
+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ else
+ goto unreg_netdev;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+
+unreg_netdev:
+ unregister_netdev(netdev);
+unsetup:
+ be_clear(adapter);
+msix_disable:
+ be_msix_disable(adapter);
+stats_clean:
+ be_stats_cleanup(adapter);
+ctrl_clean:
+ be_ctrl_cleanup(adapter);
+free_vf_cfg:
+ kfree(adapter->vf_cfg);
+free_netdev:
+ be_sriov_disable(adapter);
+ free_netdev(netdev);
+ pci_set_drvdata(pdev, NULL);
+rel_reg:
+ pci_release_regions(pdev);
+disable_dev:
+ pci_disable_device(pdev);
+do_none:
+ dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
+ return status;
+}
+
+static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ cancel_delayed_work_sync(&adapter->work);
+ if (adapter->wol)
+ be_setup_wol(adapter, true);
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
+ be_clear(adapter);
+
+ be_msix_disable(adapter);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int be_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return status;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ be_msix_enable(adapter);
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ return status;
+
+ be_setup(adapter);
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_open(netdev);
+ rtnl_unlock();
+ }
+ netif_device_attach(netdev);
+
+ if (adapter->wol)
+ be_setup_wol(adapter, false);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+}
+
+/*
+ * An FLR will stop BE from DMAing any data.
+ */
+static void be_shutdown(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!adapter)
+ return;
+
+ cancel_delayed_work_sync(&adapter->work);
+
+ netif_device_detach(adapter->netdev);
+
+ if (adapter->wol)
+ be_setup_wol(adapter, true);
+
+ be_cmd_reset_function(adapter);
+
+ pci_disable_device(pdev);
+}
+
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ adapter->eeh_err = true;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_clear(adapter);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+ adapter->eeh_err = false;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ status = be_cmd_POST(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto err;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ goto err;
+ }
+ netif_device_attach(netdev);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+}
+
+static struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
+static struct pci_driver be_driver = {
+ .name = DRV_NAME,
+ .id_table = be_dev_ids,
+ .probe = be_probe,
+ .remove = be_remove,
+ .suspend = be_suspend,
+ .resume = be_resume,
+ .shutdown = be_shutdown,
+ .err_handler = &be_eeh_handlers
+};
+
+static int __init be_init_module(void)
+{
+ if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
+ rx_frag_size != 2048) {
+ printk(KERN_WARNING DRV_NAME
+ " : Module param rx_frag_size must be 2048/4096/8192."
+ " Using 2048\n");
+ rx_frag_size = 2048;
+ }
+
+ return pci_register_driver(&be_driver);
+}
+module_init(be_init_module);
+
+static void __exit be_exit_module(void)
+{
+ pci_unregister_driver(&be_driver);
+}
+module_exit(be_exit_module);