u8 mac_addr[ETH_ALEN];
};
-#ifdef CONFIG_RFS_ACCEL
/* enic_rfs_fltr_node - rfs filter node in hash table
* @@keys: IPv4 5 tuple
* @flow_id: flow_id of clsf filter provided by kernel
struct timer_list rfs_may_expire;
};
-#endif /* CONFIG_RFS_ACCEL */
-
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
unsigned int cq_count;
-#ifdef CONFIG_RFS_ACCEL
struct enic_rfs_flw_tbl rfs_h;
-#endif
};
static inline struct device *enic_get_dev(struct enic *enic)
return ret;
}
-#ifdef CONFIG_RFS_ACCEL
-void enic_flow_may_expire(unsigned long data)
-{
- struct enic *enic = (struct enic *)data;
- bool res;
- int j;
-
- spin_lock(&enic->rfs_h.lock);
- for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
- struct hlist_head *hhead;
- struct hlist_node *tmp;
- struct enic_rfs_fltr_node *n;
-
- hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
- hlist_for_each_entry_safe(n, tmp, hhead, node) {
- res = rps_may_expire_flow(enic->netdev, n->rq_id,
- n->flow_id, n->fltr_id);
- if (res) {
- res = enic_delfltr(enic, n->fltr_id);
- if (unlikely(res))
- continue;
- hlist_del(&n->node);
- kfree(n);
- enic->rfs_h.free++;
- }
- }
- }
- spin_unlock(&enic->rfs_h.lock);
- mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
-}
-
/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
* @enic: enic data
*/
enic->rfs_h.max = enic->config.num_arfs;
enic->rfs_h.free = enic->rfs_h.max;
enic->rfs_h.toclean = 0;
- init_timer(&enic->rfs_h.rfs_may_expire);
- enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
- enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
- mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+ enic_rfs_timer_start(enic);
}
void enic_rfs_flw_tbl_free(struct enic *enic)
{
int i;
- del_timer_sync(&enic->rfs_h.rfs_may_expire);
+ enic_rfs_timer_stop(enic);
spin_lock(&enic->rfs_h.lock);
enic->rfs_h.free = 0;
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
spin_unlock(&enic->rfs_h.lock);
}
+#ifdef CONFIG_RFS_ACCEL
+void enic_flow_may_expire(unsigned long data)
+{
+ struct enic *enic = (struct enic *)data;
+ bool res;
+ int j;
+
+ spin_lock(&enic->rfs_h.lock);
+ for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
+ struct hlist_head *hhead;
+ struct hlist_node *tmp;
+ struct enic_rfs_fltr_node *n;
+
+ hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
+ hlist_for_each_entry_safe(n, tmp, hhead, node) {
+ res = rps_may_expire_flow(enic->netdev, n->rq_id,
+ n->flow_id, n->fltr_id);
+ if (res) {
+ res = enic_delfltr(enic, n->fltr_id);
+ if (unlikely(res))
+ continue;
+ hlist_del(&n->node);
+ kfree(n);
+ enic->rfs_h.free++;
+ }
+ }
+ }
+ spin_unlock(&enic->rfs_h.lock);
+ mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
struct flow_keys *k)
{
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
int enic_delfltr(struct enic *enic, u16 filter_id);
-
-#ifdef CONFIG_RFS_ACCEL
void enic_rfs_flw_tbl_init(struct enic *enic);
void enic_rfs_flw_tbl_free(struct enic *enic);
+
+#ifdef CONFIG_RFS_ACCEL
int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
+void enic_flow_may_expire(unsigned long data);
+
+static inline void enic_rfs_timer_start(struct enic *enic)
+{
+ init_timer(&enic->rfs_h.rfs_may_expire);
+ enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
+ enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
+ mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
+static inline void enic_rfs_timer_stop(struct enic *enic)
+{
+ del_timer_sync(&enic->rfs_h.rfs_may_expire);
+}
#else
-static inline void enic_rfs_flw_tbl_init(struct enic *enic) {}
-static inline void enic_rfs_flw_tbl_free(struct enic *enic) {}
+static inline void enic_rfs_timer_start(struct enic *enic) {}
+static inline void enic_rfs_timer_stop(struct enic *enic) {}
#endif /* CONFIG_RFS_ACCEL */
#endif /* _ENIC_CLSF_H_ */