From 121fa4b77775549c3c5eb41eb335d7dcbb801f90 Mon Sep 17 00:00:00 2001 From: Zoltan Kiss Date: Thu, 6 Mar 2014 21:48:24 +0000 Subject: [PATCH] xen-netback: Minor refactoring of netback code This patch contains a few bits of refactoring before introducing the grant mapping changes: - introducing xenvif_tx_pending_slots_available(), as this is used several times, and will be used more often - rename the thread to vifX.Y-guest-rx, to signify it does RX work from the guest point of view Signed-off-by: Zoltan Kiss Signed-off-by: David S. Miller --- drivers/net/xen-netback/common.h | 23 ++++++++++++++++++++++- drivers/net/xen-netback/interface.c | 4 ++-- drivers/net/xen-netback/netback.c | 22 +++------------------- 3 files changed, 27 insertions(+), 22 deletions(-) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index ae413a2cbee7..9d3584545e5d 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -108,6 +108,15 @@ struct xenvif_rx_meta { */ #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) +#define NETBACK_INVALID_HANDLE -1 + +/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating + * the maximum slots a valid packet can use. Now this value is defined + * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by + * all backend. + */ +#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; @@ -216,7 +225,7 @@ void xenvif_carrier_off(struct xenvif *vif); int xenvif_tx_action(struct xenvif *vif, int budget); -int xenvif_kthread(void *data); +int xenvif_kthread_guest_rx(void *data); void xenvif_kick_thread(struct xenvif *vif); /* Determine whether the needed number of slots (req) are available, @@ -226,6 +235,18 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); void xenvif_stop_queue(struct xenvif *vif); +static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) +{ + return MAX_PENDING_REQS - + vif->pending_prod + vif->pending_cons; +} + +static inline bool xenvif_tx_pending_slots_available(struct xenvif *vif) +{ + return nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX + < MAX_PENDING_REQS; +} + extern bool separate_tx_rx_irq; #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7669d49a67e2..bc32627a22cb 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -421,8 +421,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, disable_irq(vif->rx_irq); } - task = kthread_create(xenvif_kthread, - (void *)vif, "%s", vif->dev->name); + task = kthread_create(xenvif_kthread_guest_rx, + (void *)vif, "%s-guest-rx", vif->dev->name); if (IS_ERR(task)) { pr_warn("Could not allocate kthread for %s\n", vif->dev->name); err = PTR_ERR(task); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 43ae4bad50c4..715d810124eb 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -62,14 +62,6 @@ module_param(separate_tx_rx_irq, bool, 0644); static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; module_param(fatal_skb_slots, uint, 0444); -/* - * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating - * the maximum slots a valid packet can use. Now this value is defined - * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by - * all backend. - */ -#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN - /* * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of * one or more merged tx requests, otherwise it is the continuation of @@ -131,12 +123,6 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) -{ - return MAX_PENDING_REQS - - vif->pending_prod + vif->pending_cons; -} - bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) { RING_IDX prod, cons; @@ -1116,8 +1102,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) struct sk_buff *skb; int ret; - while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && + while (xenvif_tx_pending_slots_available(vif) && (skb_queue_len(&vif->tx_queue) < budget)) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; @@ -1487,8 +1472,7 @@ static inline int tx_work_todo(struct xenvif *vif) { if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && - (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS)) + xenvif_tx_pending_slots_available(vif)) return 1; return 0; @@ -1551,7 +1535,7 @@ static void xenvif_start_queue(struct xenvif *vif) netif_wake_queue(vif->dev); } -int xenvif_kthread(void *data) +int xenvif_kthread_guest_rx(void *data) { struct xenvif *vif = data; struct sk_buff *skb; -- 2.30.2