#define BRIDGE_VLAN_INFO_UNTAGGED (1<<2) /* VLAN egresses untagged */
#define BRIDGE_VLAN_INFO_RANGE_BEGIN (1<<3) /* VLAN is start of vlan range */
#define BRIDGE_VLAN_INFO_RANGE_END (1<<4) /* VLAN is end of vlan range */
+#define BRIDGE_VLAN_INFO_BRENTRY (1<<5) /* Global bridge VLAN entry */
struct bridge_vlan_info {
__u16 flags;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
- if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
+ if (!br_allowed_ingress(br, skb, &vid))
goto out;
if (is_broadcast_ether_addr(dest))
struct net_bridge_fdb_entry *f)
{
const unsigned char *addr = f->addr.addr;
- u16 vid = f->vlan_id;
+ struct net_bridge_vlan_group *vg;
+ const struct net_bridge_vlan *v;
struct net_bridge_port *op;
+ u16 vid = f->vlan_id;
/* Maybe another port has same hw addr? */
list_for_each_entry(op, &br->port_list, list) {
+ vg = nbp_vlan_group(op);
if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
- (!vid || nbp_vlan_find(op, vid))) {
+ (!vid || br_vlan_find(vg, vid))) {
f->dst = op;
f->added_by_user = 0;
return;
}
}
+ vg = br_vlan_group(br);
+ v = br_vlan_find(vg, vid);
/* Maybe bridge device has same hw addr? */
if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
- (!vid || br_vlan_find(br, vid))) {
+ (!vid || (v && br_vlan_should_use(v)))) {
f->dst = NULL;
f->added_by_user = 0;
return;
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
+ struct net_bridge_vlan_group *vg;
struct net_bridge *br = p->br;
- struct net_port_vlans *pv = nbp_get_vlan_info(p);
- bool no_vlan = !pv;
+ struct net_bridge_vlan *v;
int i;
- u16 vid;
spin_lock_bh(&br->hash_lock);
+ vg = nbp_vlan_group(p);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h;
* configured, we can safely be done at
* this point.
*/
- if (no_vlan)
+ if (!vg || !vg->num_vlans)
goto insert;
}
}
/* insert new address, may fail if invalid address or dup. */
fdb_insert(br, p, newaddr, 0);
- if (no_vlan)
+ if (!vg || !vg->num_vlans)
goto done;
/* Now add entries for every VLAN configured on the port.
* This function runs under RTNL so the bitmap will not change
* from under us.
*/
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
- fdb_insert(br, p, newaddr, vid);
+ list_for_each_entry(v, &vg->vlan_list, vlist)
+ fdb_insert(br, p, newaddr, v->vid);
done:
spin_unlock_bh(&br->hash_lock);
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
{
+ struct net_bridge_vlan_group *vg;
struct net_bridge_fdb_entry *f;
- struct net_port_vlans *pv;
- u16 vid = 0;
+ struct net_bridge_vlan *v;
spin_lock_bh(&br->hash_lock);
fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
-
+ vg = br_vlan_group(br);
+ if (!vg || !vg->num_vlans)
+ goto out;
/* Now remove and add entries for every VLAN configured on the
* bridge. This function runs under RTNL so the bitmap will not
* change from under us.
*/
- pv = br_get_vlan_info(br);
- if (!pv)
- goto out;
-
- for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
- f = __br_fdb_get(br, br->dev->dev_addr, vid);
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
if (f && f->is_local && !f->dst)
fdb_delete_local(br, NULL, f);
- fdb_insert(br, NULL, newaddr, vid);
+ fdb_insert(br, NULL, newaddr, v->vid);
}
out:
spin_unlock_bh(&br->hash_lock);
struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags)
{
+ struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
+ struct net_bridge_vlan *v;
int err = 0;
- struct net_port_vlans *pv;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
return -EINVAL;
}
- pv = nbp_get_vlan_info(p);
+ vg = nbp_vlan_group(p);
if (vid) {
- if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
+ v = br_vlan_find(vg, vid);
+ if (!v) {
pr_info("bridge: RTM_NEWNEIGH with unconfigured "
"vlan %d on port %s\n", vid, dev->name);
return -EINVAL;
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
- if (err || !pv)
+ if (err || !vg || !vg->num_vlans)
goto out;
/* We have vlans configured on this port and user didn't
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
- err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ err = __br_fdb_add(ndm, p, addr, nlh_flags, v->vid);
if (err)
goto out;
}
struct net_device *dev,
const unsigned char *addr, u16 vid)
{
+ struct net_bridge_vlan_group *vg;
struct net_bridge_port *p;
+ struct net_bridge_vlan *v;
int err;
- struct net_port_vlans *pv;
p = br_port_get_rtnl(dev);
if (p == NULL) {
return -EINVAL;
}
- pv = nbp_get_vlan_info(p);
+ vg = nbp_vlan_group(p);
if (vid) {
- if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
+ v = br_vlan_find(vg, vid);
+ if (!v) {
pr_info("bridge: RTM_DELNEIGH with unconfigured "
"vlan %d on port %s\n", vid, dev->name);
return -EINVAL;
} else {
err = -ENOENT;
err &= __br_fdb_delete(p, addr, 0);
- if (!pv)
+ if (!vg || !vg->num_vlans)
goto out;
- /* We have vlans configured on this port and user didn't
- * specify a VLAN. To be nice, add/update entry for every
- * vlan on this port.
- */
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
- err &= __br_fdb_delete(p, addr, vid);
- }
+ list_for_each_entry(v, &vg->vlan_list, vlist)
+ err &= __br_fdb_delete(p, addr, v->vid);
}
out:
return err;
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
+ struct net_bridge_vlan_group *vg;
+
+ vg = nbp_vlan_group(p);
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
- br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
- p->state == BR_STATE_FORWARDING;
+ br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
}
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
- skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+ struct net_bridge_vlan_group *vg;
+
+ vg = nbp_vlan_group(to);
+ skb = br_handle_vlan(to->br, vg, skb);
if (!skb)
return;
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
+ struct net_bridge_vlan_group *vg;
struct net_device *indev;
if (skb_warn_if_lro(skb)) {
return;
}
- skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+ vg = nbp_vlan_group(to);
+ skb = br_handle_vlan(to->br, vg, skb);
if (!skb)
return;
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
+ struct net_bridge_vlan_group *vg;
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
- struct net_port_vlans *pv;
u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
brstats->rx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
+ vg = br_vlan_group(br);
/* Bridge is just like any other port. Make sure the
* packet is allowed except in promisc modue when someone
* may be running packet capture.
*/
- pv = br_get_vlan_info(br);
if (!(brdev->flags & IFF_PROMISC) &&
- !br_allowed_egress(br, pv, skb)) {
+ !br_allowed_egress(vg, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
indev = skb->dev;
skb->dev = brdev;
- skb = br_handle_vlan(br, pv, skb);
+ skb = br_handle_vlan(br, vg, skb);
if (!skb)
return NET_RX_DROP;
if (!p || p->state == BR_STATE_DISABLED)
goto drop;
- if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
+ if (!nbp_allowed_ingress(p, skb, &vid))
goto out;
/* insert into forwarding database after filtering to avoid spoofing */
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
- unsigned short vid = VLAN_N_VID;
+ struct net_bridge_vlan_group *vg;
struct net_device *dev, *pdev;
struct br_mdb_entry *entry;
struct net_bridge_port *p;
- struct net_port_vlans *pv;
+ struct net_bridge_vlan *v;
struct net_bridge *br;
int err;
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
return -EINVAL;
- pv = nbp_get_vlan_info(p);
- if (br_vlan_enabled(br) && pv && entry->vid == 0) {
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
- entry->vid = vid;
+ vg = nbp_vlan_group(p);
+ if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ entry->vid = v->vid;
err = __br_mdb_add(net, br, entry);
if (err)
break;
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
- unsigned short vid = VLAN_N_VID;
+ struct net_bridge_vlan_group *vg;
struct net_device *dev, *pdev;
struct br_mdb_entry *entry;
struct net_bridge_port *p;
- struct net_port_vlans *pv;
+ struct net_bridge_vlan *v;
struct net_bridge *br;
int err;
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
return -EINVAL;
- pv = nbp_get_vlan_info(p);
- if (br_vlan_enabled(br) && pv && entry->vid == 0) {
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
- entry->vid = vid;
+ vg = nbp_vlan_group(p);
+ if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ entry->vid = v->vid;
err = __br_mdb_del(br, entry);
if (!err)
__br_mdb_notify(dev, entry, RTM_DELMDB);
#include "br_private.h"
#include "br_private_stp.h"
-static int br_get_num_vlan_infos(const struct net_port_vlans *pv,
- u32 filter_mask)
+static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
+ u32 filter_mask,
+ u16 pvid)
{
- u16 vid_range_start = 0, vid_range_end = 0;
- u16 vid_range_flags = 0;
- u16 pvid, vid, flags;
+ struct net_bridge_vlan *v;
+ u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
+ u16 flags;
int num_vlans = 0;
- if (filter_mask & RTEXT_FILTER_BRVLAN)
- return pv->num_vlans;
-
if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
return 0;
- /* Count number of vlan info's
- */
- pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ /* Count number of vlan infos */
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
flags = 0;
- if (vid == pvid)
+ /* only a context, bridge vlan not activated */
+ if (!br_vlan_should_use(v))
+ continue;
+ if (v->vid == pvid)
flags |= BRIDGE_VLAN_INFO_PVID;
- if (test_bit(vid, pv->untagged_bitmap))
+ if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (vid_range_start == 0) {
goto initvars;
- } else if ((vid - vid_range_end) == 1 &&
+ } else if ((v->vid - vid_range_end) == 1 &&
flags == vid_range_flags) {
- vid_range_end = vid;
+ vid_range_end = v->vid;
continue;
} else {
if ((vid_range_end - vid_range_start) > 0)
num_vlans += 1;
}
initvars:
- vid_range_start = vid;
- vid_range_end = vid;
+ vid_range_start = v->vid;
+ vid_range_end = v->vid;
vid_range_flags = flags;
}
return num_vlans;
}
+static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
+ u32 filter_mask, u16 pvid)
+{
+ if (!vg)
+ return 0;
+
+ if (filter_mask & RTEXT_FILTER_BRVLAN)
+ return vg->num_vlans;
+
+ return __get_num_vlan_infos(vg, filter_mask, pvid);
+}
+
static size_t br_get_link_af_size_filtered(const struct net_device *dev,
u32 filter_mask)
{
- struct net_port_vlans *pv;
+ struct net_bridge_vlan_group *vg = NULL;
+ struct net_bridge_port *p;
+ struct net_bridge *br;
int num_vlan_infos;
+ u16 pvid = 0;
rcu_read_lock();
- if (br_port_exists(dev))
- pv = nbp_get_vlan_info(br_port_get_rcu(dev));
- else if (dev->priv_flags & IFF_EBRIDGE)
- pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
- else
- pv = NULL;
- if (pv)
- num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask);
- else
- num_vlan_infos = 0;
+ if (br_port_exists(dev)) {
+ p = br_port_get_rcu(dev);
+ vg = nbp_vlan_group(p);
+ pvid = nbp_get_pvid(p);
+ } else if (dev->priv_flags & IFF_EBRIDGE) {
+ br = netdev_priv(dev);
+ vg = br_vlan_group(br);
+ pvid = br_get_pvid(br);
+ }
+ num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask, pvid);
rcu_read_unlock();
- if (!num_vlan_infos)
- return 0;
-
/* Each VLAN is returned in bridge_vlan_info along with flags */
return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
}
}
static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
- const struct net_port_vlans *pv)
+ struct net_bridge_vlan_group *vg,
+ u16 pvid)
{
- u16 vid_range_start = 0, vid_range_end = 0;
- u16 vid_range_flags = 0;
- u16 pvid, vid, flags;
+ struct net_bridge_vlan *v;
+ u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
+ u16 flags;
int err = 0;
/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
* and mark vlan info with begin and end flags
* if vlaninfo represents a range
*/
- pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
flags = 0;
- if (vid == pvid)
+ if (!br_vlan_should_use(v))
+ continue;
+ if (v->vid == pvid)
flags |= BRIDGE_VLAN_INFO_PVID;
- if (test_bit(vid, pv->untagged_bitmap))
+ if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (vid_range_start == 0) {
goto initvars;
- } else if ((vid - vid_range_end) == 1 &&
+ } else if ((v->vid - vid_range_end) == 1 &&
flags == vid_range_flags) {
- vid_range_end = vid;
+ vid_range_end = v->vid;
continue;
} else {
err = br_fill_ifvlaninfo_range(skb, vid_range_start,
}
initvars:
- vid_range_start = vid;
- vid_range_end = vid;
+ vid_range_start = v->vid;
+ vid_range_end = v->vid;
vid_range_flags = flags;
}
}
static int br_fill_ifvlaninfo(struct sk_buff *skb,
- const struct net_port_vlans *pv)
+ struct net_bridge_vlan_group *vg,
+ u16 pvid)
{
struct bridge_vlan_info vinfo;
- u16 pvid, vid;
+ struct net_bridge_vlan *v;
- pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
- vinfo.vid = vid;
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
+
+ vinfo.vid = v->vid;
vinfo.flags = 0;
- if (vid == pvid)
+ if (v->vid == pvid)
vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
- if (test_bit(vid, pv->untagged_bitmap))
+ if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
* Contains port and master info as well as carrier and bridge state.
*/
static int br_fill_ifinfo(struct sk_buff *skb,
- const struct net_bridge_port *port,
+ struct net_bridge_port *port,
u32 pid, u32 seq, int event, unsigned int flags,
u32 filter_mask, const struct net_device *dev)
{
- const struct net_bridge *br;
+ struct net_bridge *br;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
/* Check if the VID information is requested */
if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
- const struct net_port_vlans *pv;
+ struct net_bridge_vlan_group *vg;
struct nlattr *af;
+ u16 pvid;
int err;
- if (port)
- pv = nbp_get_vlan_info(port);
- else
- pv = br_get_vlan_info(br);
+ if (port) {
+ vg = nbp_vlan_group(port);
+ pvid = nbp_get_pvid(port);
+ } else {
+ vg = br_vlan_group(br);
+ pvid = br_get_pvid(br);
+ }
- if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
+ if (!vg || !vg->num_vlans)
goto done;
af = nla_nest_start(skb, IFLA_AF_SPEC);
goto nla_put_failure;
if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
- err = br_fill_ifvlaninfo_compressed(skb, pv);
+ err = br_fill_ifvlaninfo_compressed(skb, vg, pvid);
else
- err = br_fill_ifvlaninfo(skb, pv);
+ err = br_fill_ifvlaninfo(skb, vg, pvid);
if (err)
goto nla_put_failure;
nla_nest_end(skb, af);
switch (cmd) {
case RTM_SETLINK:
if (p) {
+ /* if the MASTER flag is set this will act on the global
+ * per-VLAN entry as well
+ */
err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
if (err)
break;
-
- if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
- err = br_vlan_add(p->br, vinfo->vid,
- vinfo->flags);
} else {
+ vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
err = br_vlan_add(br, vinfo->vid, vinfo->flags);
}
break;
static size_t br_get_link_af_size(const struct net_device *dev)
{
- struct net_port_vlans *pv;
-
- if (br_port_exists(dev))
- pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
- else if (dev->priv_flags & IFF_EBRIDGE)
- pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
- else
- return 0;
+ struct net_bridge_port *p;
+ struct net_bridge *br;
+ int num_vlans = 0;
- if (!pv)
- return 0;
+ if (br_port_exists(dev)) {
+ p = br_port_get_rtnl(dev);
+ num_vlans = br_get_num_vlan_infos(nbp_vlan_group(p),
+ RTEXT_FILTER_BRVLAN, 0);
+ } else if (dev->priv_flags & IFF_EBRIDGE) {
+ br = netdev_priv(dev);
+ num_vlans = br_get_num_vlan_infos(br_vlan_group(br),
+ RTEXT_FILTER_BRVLAN, 0);
+ }
/* Each VLAN is returned in bridge_vlan_info along with flags */
- return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
+ return num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
}
static struct rtnl_af_ops br_af_ops __read_mostly = {
#include <net/route.h>
#include <net/ip6_fib.h>
#include <linux/if_vlan.h>
+#include <linux/rhashtable.h>
#define BR_HASH_BITS 8
#define BR_HASH_SIZE (1 << BR_HASH_BITS)
#define BR_PORT_BITS 10
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
-#define BR_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
#define BR_VERSION "2.3"
};
#endif
-struct net_port_vlans {
- u16 port_idx;
- u16 pvid;
+/**
+ * struct net_bridge_vlan - per-vlan entry
+ *
+ * @vnode: rhashtable member
+ * @vid: VLAN id
+ * @flags: bridge vlan flags
+ * @br: if MASTER flag set, this points to a bridge struct
+ * @port: if MASTER flag unset, this points to a port struct
+ * @refcnt: if MASTER flag set, this is bumped for each port referencing it
+ * @brvlan: if MASTER flag unset, this points to the global per-VLAN context
+ * for this VLAN entry
+ * @vlist: sorted list of VLAN entries
+ * @rcu: used for entry destruction
+ *
+ * This structure is shared between the global per-VLAN entries contained in
+ * the bridge rhashtable and the local per-port per-VLAN entries contained in
+ * the port's rhashtable. The union entries should be interpreted depending on
+ * the entry flags that are set.
+ */
+struct net_bridge_vlan {
+ struct rhash_head vnode;
+ u16 vid;
+ u16 flags;
union {
- struct net_bridge_port *port;
- struct net_bridge *br;
- } parent;
+ struct net_bridge *br;
+ struct net_bridge_port *port;
+ };
+ union {
+ atomic_t refcnt;
+ struct net_bridge_vlan *brvlan;
+ };
+ struct list_head vlist;
+
struct rcu_head rcu;
- unsigned long vlan_bitmap[BR_VLAN_BITMAP_LEN];
- unsigned long untagged_bitmap[BR_VLAN_BITMAP_LEN];
+};
+
+/**
+ * struct net_bridge_vlan_group
+ *
+ * @vlan_hash: VLAN entry rhashtable
+ * @vlan_list: sorted VLAN entry list
+ * @num_vlans: number of total VLAN entries
+ *
+ * IMPORTANT: Be careful when checking if there're VLAN entries using list
+ * primitives because the bridge can have entries in its list which
+ * are just for global context but not for filtering, i.e. they have
+ * the master flag set but not the brentry flag. If you have to check
+ * if there're "real" entries in the bridge please test @num_vlans
+ */
+struct net_bridge_vlan_group {
+ struct rhashtable vlan_hash;
+ struct list_head vlan_list;
u16 num_vlans;
};
struct netpoll *np;
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
- struct net_port_vlans __rcu *vlan_info;
+ struct net_bridge_vlan_group *vlgrp;
+ u16 pvid;
#endif
};
struct kobject *ifobj;
u32 auto_cnt;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ struct net_bridge_vlan_group *vlgrp;
u8 vlan_enabled;
__be16 vlan_proto;
u16 default_pvid;
- struct net_port_vlans __rcu *vlan_info;
+ u16 pvid;
#endif
};
return !memcmp(&br->bridge_id, &br->designated_root, 8);
}
+/* check if a VLAN entry is global */
+static inline bool br_vlan_is_master(const struct net_bridge_vlan *v)
+{
+ return v->flags & BRIDGE_VLAN_INFO_MASTER;
+}
+
+/* check if a VLAN entry is used by the bridge */
+static inline bool br_vlan_is_brentry(const struct net_bridge_vlan *v)
+{
+ return v->flags & BRIDGE_VLAN_INFO_BRENTRY;
+}
+
+/* check if we should use the vlan entry is usable */
+static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
+{
+ if (br_vlan_is_master(v)) {
+ if (br_vlan_is_brentry(v))
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
/* br_device.c */
void br_dev_setup(struct net_device *dev);
void br_dev_delete(struct net_device *dev, struct list_head *list);
/* br_vlan.c */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
-bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
- struct sk_buff *skb, u16 *vid);
-bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
+bool br_allowed_ingress(struct net_bridge *br, struct sk_buff *skb, u16 *vid);
+bool nbp_allowed_ingress(struct net_bridge_port *p, struct sk_buff *skb,
+ u16 *vid);
+bool br_allowed_egress(struct net_bridge_vlan_group *br,
const struct sk_buff *skb);
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
struct sk_buff *br_handle_vlan(struct net_bridge *br,
- const struct net_port_vlans *v,
+ struct net_bridge_vlan_group *vg,
struct sk_buff *skb);
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
-bool br_vlan_find(struct net_bridge *br, u16 vid);
+struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid);
void br_recalculate_fwd_mask(struct net_bridge *br);
int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
void nbp_vlan_flush(struct net_bridge_port *port);
-bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
int nbp_vlan_init(struct net_bridge_port *port);
+int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
-static inline struct net_port_vlans *br_get_vlan_info(
- const struct net_bridge *br)
+static inline struct net_bridge_vlan_group *br_vlan_group(
+ const struct net_bridge *br)
{
- return rcu_dereference_rtnl(br->vlan_info);
+ return br->vlgrp;
}
-static inline struct net_port_vlans *nbp_get_vlan_info(
- const struct net_bridge_port *p)
+static inline struct net_bridge_vlan_group *nbp_vlan_group(
+ const struct net_bridge_port *p)
{
- return rcu_dereference_rtnl(p->vlan_info);
+ return p->vlgrp;
}
/* Since bridge now depends on 8021Q module, but the time bridge sees the
{
int err = 0;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
*vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
- else {
+ } else {
*vid = 0;
err = -EINVAL;
}
return err;
}
-static inline u16 br_get_pvid(const struct net_port_vlans *v)
+static inline u16 br_get_pvid(const struct net_bridge *br)
{
- if (!v)
+ if (!br)
return 0;
smp_rmb();
- return v->pvid;
+ return br->pvid;
+}
+
+static inline u16 nbp_get_pvid(const struct net_bridge_port *p)
+{
+ if (!p)
+ return 0;
+
+ smp_rmb();
+ return p->pvid;
}
static inline int br_vlan_enabled(struct net_bridge *br)
}
#else
static inline bool br_allowed_ingress(struct net_bridge *br,
- struct net_port_vlans *v,
struct sk_buff *skb,
u16 *vid)
{
return true;
}
-static inline bool br_allowed_egress(struct net_bridge *br,
- const struct net_port_vlans *v,
+static inline bool nbp_allowed_ingress(struct net_bridge_port *p,
+ struct sk_buff *skb,
+ u16 *vid)
+{
+ return true;
+}
+
+static inline bool br_allowed_egress(struct net_bridge_vlan_group *vg,
const struct sk_buff *skb)
{
return true;
}
static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
- const struct net_port_vlans *v,
+ struct net_bridge_vlan_group *vg,
struct sk_buff *skb)
{
return skb;
{
}
-static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
-{
- return false;
-}
-
static inline void br_recalculate_fwd_mask(struct net_bridge *br)
{
}
{
}
-static inline struct net_port_vlans *br_get_vlan_info(
- const struct net_bridge *br)
-{
- return NULL;
-}
-static inline struct net_port_vlans *nbp_get_vlan_info(
- const struct net_bridge_port *p)
+static inline struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg,
+ u16 vid)
{
return NULL;
}
-static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
+static inline int nbp_vlan_init(struct net_bridge_port *port)
{
- return false;
+ return 0;
}
-static inline int nbp_vlan_init(struct net_bridge_port *port)
+static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
{
return 0;
}
-static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
+static inline u16 br_get_pvid(const struct net_bridge *br)
{
return 0;
}
-static inline u16 br_get_pvid(const struct net_port_vlans *v)
+
+static inline u16 nbp_get_pvid(const struct net_bridge_port *p)
{
return 0;
}
{
return -EOPNOTSUPP;
}
+
+static inline int nbp_get_num_vlan_infos(struct net_bridge_port *p,
+ u32 filter_mask)
+{
+ return 0;
+}
+
+static inline struct net_bridge_vlan_group *br_vlan_group(
+ const struct net_bridge *br)
+{
+ return NULL;
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group(
+ const struct net_bridge_port *p)
+{
+ return NULL;
+}
#endif
struct nf_br_ops {
#include "br_private.h"
-static void __vlan_add_pvid(struct net_port_vlans *v, u16 vid)
+static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
{
- if (v->pvid == vid)
+ const struct net_bridge_vlan *vle = ptr;
+ u16 vid = *(u16 *)arg->key;
+
+ return vle->vid != vid;
+}
+
+static const struct rhashtable_params br_vlan_rht_params = {
+ .head_offset = offsetof(struct net_bridge_vlan, vnode),
+ .key_offset = offsetof(struct net_bridge_vlan, vid),
+ .key_len = sizeof(u16),
+ .max_size = VLAN_N_VID,
+ .obj_cmpfn = br_vlan_cmp,
+ .automatic_shrinking = true,
+};
+
+static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
+{
+ return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
+}
+
+static void __vlan_add_pvid(u16 *pvid, u16 vid)
+{
+ if (*pvid == vid)
return;
smp_wmb();
- v->pvid = vid;
+ *pvid = vid;
}
-static void __vlan_delete_pvid(struct net_port_vlans *v, u16 vid)
+static void __vlan_delete_pvid(u16 *pvid, u16 vid)
{
- if (v->pvid != vid)
+ if (*pvid != vid)
return;
smp_wmb();
- v->pvid = 0;
+ *pvid = 0;
}
-static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
+static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
{
- if (flags & BRIDGE_VLAN_INFO_PVID)
- __vlan_add_pvid(v, vid);
- else
- __vlan_delete_pvid(v, vid);
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ if (br_vlan_is_master(v))
+ __vlan_add_pvid(&v->br->pvid, v->vid);
+ else
+ __vlan_add_pvid(&v->port->pvid, v->vid);
+ } else {
+ if (br_vlan_is_master(v))
+ __vlan_delete_pvid(&v->br->pvid, v->vid);
+ else
+ __vlan_delete_pvid(&v->port->pvid, v->vid);
+ }
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
- set_bit(vid, v->untagged_bitmap);
+ v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
else
- clear_bit(vid, v->untagged_bitmap);
+ v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
}
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
return err;
}
-static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
+static void __vlan_add_list(struct net_bridge_vlan *v)
{
- struct net_bridge_port *p = NULL;
- struct net_bridge *br;
- struct net_device *dev;
- int err;
-
- if (test_bit(vid, v->vlan_bitmap)) {
- __vlan_add_flags(v, vid, flags);
- return 0;
- }
-
- if (v->port_idx) {
- p = v->parent.port;
- br = p->br;
- dev = p->dev;
- } else {
- br = v->parent.br;
- dev = br->dev;
- }
-
- if (p) {
- /* Add VLAN to the device filter if it is supported.
- * This ensures tagged traffic enters the bridge when
- * promiscuous mode is disabled by br_manage_promisc().
- */
- err = __vlan_vid_add(dev, br, vid, flags);
- if (err)
- return err;
- }
+ struct list_head *headp, *hpos;
+ struct net_bridge_vlan *vent;
- err = br_fdb_insert(br, p, dev->dev_addr, vid);
- if (err) {
- br_err(br, "failed insert local address into bridge "
- "forwarding table\n");
- goto out_filt;
+ headp = br_vlan_is_master(v) ? &v->br->vlgrp->vlan_list :
+ &v->port->vlgrp->vlan_list;
+ list_for_each_prev(hpos, headp) {
+ vent = list_entry(hpos, struct net_bridge_vlan, vlist);
+ if (v->vid < vent->vid)
+ continue;
+ else
+ break;
}
+ list_add(&v->vlist, hpos);
+}
- set_bit(vid, v->vlan_bitmap);
- v->num_vlans++;
- __vlan_add_flags(v, vid, flags);
-
- return 0;
-
-out_filt:
- if (p)
- vlan_vid_del(dev, br->vlan_proto, vid);
- return err;
+static void __vlan_del_list(struct net_bridge_vlan *v)
+{
+ list_del(&v->vlist);
}
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
return err;
}
-static int __vlan_del(struct net_port_vlans *v, u16 vid)
+/* This is the shared VLAN add function which works for both ports and bridge
+ * devices. There are four possible calls to this function in terms of the
+ * vlan entry type:
+ * 1. vlan is being added on a port (no master flags, global entry exists)
+ * 2. vlan is being added on a bridge (both master and brvlan flags)
+ * 3. vlan is being added on a port, but a global entry didn't exist which
+ * is being created right now (master flag set, brvlan flag unset), the
+ * global entry is used for global per-vlan features, but not for filtering
+ * 4. same as 3 but with both master and brvlan flags set so the entry
+ * will be used for filtering in both the port and the bridge
+ */
+static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
{
- if (!test_bit(vid, v->vlan_bitmap))
- return -EINVAL;
+ struct net_bridge_vlan *masterv = NULL;
+ struct net_bridge_port *p = NULL;
+ struct rhashtable *tbl;
+ struct net_device *dev;
+ struct net_bridge *br;
+ int err;
+
+ if (br_vlan_is_master(v)) {
+ br = v->br;
+ dev = br->dev;
+ tbl = &br->vlgrp->vlan_hash;
+ } else {
+ p = v->port;
+ br = p->br;
+ dev = p->dev;
+ tbl = &p->vlgrp->vlan_hash;
+ }
+
+ if (p) {
+ u16 master_flags = flags;
+
+ /* Add VLAN to the device filter if it is supported.
+ * This ensures tagged traffic enters the bridge when
+ * promiscuous mode is disabled by br_manage_promisc().
+ */
+ err = __vlan_vid_add(dev, br, v->vid, flags);
+ if (err)
+ goto out;
+
+ /* need to work on the master vlan too */
+ if (flags & BRIDGE_VLAN_INFO_MASTER) {
+ master_flags |= BRIDGE_VLAN_INFO_BRENTRY;
+ err = br_vlan_add(br, v->vid, master_flags);
+ if (err)
+ goto out_filt;
+ }
+
+ masterv = br_vlan_find(br->vlgrp, v->vid);
+ if (!masterv) {
+ /* missing global ctx, create it now */
+ err = br_vlan_add(br, v->vid, master_flags);
+ if (err)
+ goto out_filt;
+ masterv = br_vlan_find(br->vlgrp, v->vid);
+ WARN_ON(!masterv);
+ }
+ atomic_inc(&masterv->refcnt);
+ v->brvlan = masterv;
+ }
+
+ /* Add the dev mac only if it's a usable vlan */
+ if (br_vlan_should_use(v)) {
+ err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
+ if (err) {
+ br_err(br, "failed insert local address into bridge forwarding table\n");
+ goto out_filt;
+ }
+ }
+
+ err = rhashtable_lookup_insert_fast(tbl, &v->vnode, br_vlan_rht_params);
+ if (err)
+ goto out_fdb_insert;
- __vlan_delete_pvid(v, vid);
- clear_bit(vid, v->untagged_bitmap);
+ __vlan_add_list(v);
+ __vlan_add_flags(v, flags);
+ if (br_vlan_is_master(v)) {
+ if (br_vlan_is_brentry(v))
+ br->vlgrp->num_vlans++;
+ } else {
+ p->vlgrp->num_vlans++;
+ }
+out:
+ return err;
+
+out_fdb_insert:
+ br_fdb_find_delete_local(br, p, br->dev->dev_addr, v->vid);
+
+out_filt:
+ if (p) {
+ __vlan_vid_del(dev, br, v->vid);
+ if (masterv) {
+ atomic_dec(&masterv->refcnt);
+ v->brvlan = NULL;
+ }
+ }
+
+ goto out;
+}
+
+static int __vlan_del(struct net_bridge_vlan *v)
+{
+ struct net_bridge_vlan *masterv = v;
+ struct net_bridge_port *p = NULL;
+ struct net_bridge *br;
+ int err = 0;
+ struct rhashtable *tbl;
+ u16 *pvid;
- if (v->port_idx) {
- struct net_bridge_port *p = v->parent.port;
- int err;
+ if (br_vlan_is_master(v)) {
+ br = v->br;
+ tbl = &v->br->vlgrp->vlan_hash;
+ pvid = &v->br->pvid;
+ } else {
+ p = v->port;
+ br = p->br;
+ tbl = &p->vlgrp->vlan_hash;
+ masterv = v->brvlan;
+ pvid = &p->pvid;
+ }
- err = __vlan_vid_del(p->dev, p->br, vid);
+ __vlan_delete_pvid(pvid, v->vid);
+ if (p) {
+ err = __vlan_vid_del(p->dev, p->br, v->vid);
if (err)
- return err;
+ goto out;
}
- clear_bit(vid, v->vlan_bitmap);
- v->num_vlans--;
- if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
- if (v->port_idx)
- RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
- else
- RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
+ if (br_vlan_is_master(v)) {
+ if (br_vlan_is_brentry(v)) {
+ v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
+ br->vlgrp->num_vlans--;
+ }
+ } else {
+ p->vlgrp->num_vlans--;
+ }
+
+ if (masterv != v) {
+ rhashtable_remove_fast(tbl, &v->vnode, br_vlan_rht_params);
+ __vlan_del_list(v);
kfree_rcu(v, rcu);
}
- return 0;
+
+ if (atomic_dec_and_test(&masterv->refcnt)) {
+ rhashtable_remove_fast(&masterv->br->vlgrp->vlan_hash,
+ &masterv->vnode, br_vlan_rht_params);
+ __vlan_del_list(masterv);
+ kfree_rcu(masterv, rcu);
+ }
+out:
+ return err;
}
-static void __vlan_flush(struct net_port_vlans *v)
+static void __vlan_flush(struct net_bridge_vlan_group *vlgrp, u16 *pvid)
{
- smp_wmb();
- v->pvid = 0;
- bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
- if (v->port_idx)
- RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
- else
- RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
- kfree_rcu(v, rcu);
+ struct net_bridge_vlan *vlan, *tmp;
+
+ __vlan_delete_pvid(pvid, *pvid);
+ list_for_each_entry_safe(vlan, tmp, &vlgrp->vlan_list, vlist)
+ __vlan_del(vlan);
+ rhashtable_destroy(&vlgrp->vlan_hash);
+ kfree(vlgrp);
}
struct sk_buff *br_handle_vlan(struct net_bridge *br,
- const struct net_port_vlans *pv,
+ struct net_bridge_vlan_group *vg,
struct sk_buff *skb)
{
+ struct net_bridge_vlan *v;
u16 vid;
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
goto out;
- /* Vlan filter table must be configured at this point. The
+ /* At this point, we know that the frame was filtered and contains
+ * a valid vlan id. If the vlan id has untagged flag set,
+ * send untagged; otherwise, send tagged.
+ */
+ br_vlan_get_tag(skb, &vid);
+ v = br_vlan_find(vg, vid);
+ /* Vlan entry must be configured at this point. The
* only exception is the bridge is set in promisc mode and the
* packet is destined for the bridge device. In this case
* pass the packet as is.
*/
- if (!pv) {
+ if (!v || !br_vlan_should_use(v)) {
if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
goto out;
} else {
return NULL;
}
}
-
- /* At this point, we know that the frame was filtered and contains
- * a valid vlan id. If the vlan id is set in the untagged bitmap,
- * send untagged; otherwise, send tagged.
- */
- br_vlan_get_tag(skb, &vid);
- if (test_bit(vid, pv->untagged_bitmap))
+ if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
skb->vlan_tci = 0;
out:
}
/* Called under RCU */
-bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
- struct sk_buff *skb, u16 *vid)
+static bool __allowed_ingress(struct rhashtable *tbl, u16 pvid, __be16 proto,
+ struct sk_buff *skb, u16 *vid)
{
+ const struct net_bridge_vlan *v;
bool tagged;
- __be16 proto;
-
- /* If VLAN filtering is disabled on the bridge, all packets are
- * permitted.
- */
- if (!br->vlan_enabled) {
- BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
- return true;
- }
-
- /* If there are no vlan in the permitted list, all packets are
- * rejected.
- */
- if (!v)
- goto drop;
BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
- proto = br->vlan_proto;
-
/* If vlan tx offload is disabled on bridge device and frame was
* sent from vlan device on the bridge device, it does not have
* HW accelerated vlan tag.
}
if (!*vid) {
- u16 pvid = br_get_pvid(v);
-
/* Frame had a tag with VID 0 or did not have a tag.
* See if pvid is set on this port. That tells us which
* vlan untagged or priority-tagged traffic belongs to.
}
/* Frame had a valid vlan tag. See if vlan is allowed */
- if (test_bit(*vid, v->vlan_bitmap))
+ v = br_vlan_lookup(tbl, *vid);
+ if (v && br_vlan_should_use(v))
return true;
drop:
kfree_skb(skb);
return false;
}
+bool br_allowed_ingress(struct net_bridge *br, struct sk_buff *skb, u16 *vid)
+{
+ /* If VLAN filtering is disabled on the bridge, all packets are
+ * permitted.
+ */
+ if (!br->vlan_enabled) {
+ BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
+ return true;
+ }
+
+ return __allowed_ingress(&br->vlgrp->vlan_hash, br->pvid,
+ br->vlan_proto, skb, vid);
+}
+
+bool nbp_allowed_ingress(struct net_bridge_port *p, struct sk_buff *skb,
+ u16 *vid)
+{
+ struct net_bridge *br = p->br;
+
+ /* If VLAN filtering is disabled on the bridge, all packets are
+ * permitted.
+ */
+ if (!br->vlan_enabled) {
+ BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
+ return true;
+ }
+
+ return __allowed_ingress(&p->vlgrp->vlan_hash, p->pvid, br->vlan_proto,
+ skb, vid);
+}
+
/* Called under RCU. */
-bool br_allowed_egress(struct net_bridge *br,
- const struct net_port_vlans *v,
+bool br_allowed_egress(struct net_bridge_vlan_group *vg,
const struct sk_buff *skb)
{
+ const struct net_bridge_vlan *v;
u16 vid;
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
return true;
- if (!v)
- return false;
-
br_vlan_get_tag(skb, &vid);
- if (test_bit(vid, v->vlan_bitmap))
+ v = br_vlan_find(vg, vid);
+ if (v && br_vlan_should_use(v))
return true;
return false;
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
struct net_bridge *br = p->br;
- struct net_port_vlans *v;
/* If filtering was disabled at input, let it pass. */
if (!br->vlan_enabled)
return true;
- v = rcu_dereference(p->vlan_info);
- if (!v)
+ if (!p->vlgrp->num_vlans)
return false;
if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
*vid = 0;
if (!*vid) {
- *vid = br_get_pvid(v);
+ *vid = nbp_get_pvid(p);
if (!*vid)
return false;
return true;
}
- if (test_bit(*vid, v->vlan_bitmap))
+ if (br_vlan_find(p->vlgrp, *vid))
return true;
return false;
*/
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
{
- struct net_port_vlans *pv = NULL;
- int err;
+ struct net_bridge_vlan *vlan;
+ int ret;
ASSERT_RTNL();
- pv = rtnl_dereference(br->vlan_info);
- if (pv)
- return __vlan_add(pv, vid, flags);
+ vlan = br_vlan_find(br->vlgrp, vid);
+ if (vlan) {
+ if (!br_vlan_is_brentry(vlan)) {
+ /* Trying to change flags of non-existent bridge vlan */
+ if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return -EINVAL;
+ /* It was only kept for port vlans, now make it real */
+ ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
+ vlan->vid);
+ if (ret) {
+ br_err(br, "failed insert local address into bridge forwarding table\n");
+ return ret;
+ }
+ atomic_inc(&vlan->refcnt);
+ vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
+ br->vlgrp->num_vlans++;
+ }
+ __vlan_add_flags(vlan, flags);
+ return 0;
+ }
- /* Create port vlan infomration
- */
- pv = kzalloc(sizeof(*pv), GFP_KERNEL);
- if (!pv)
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
return -ENOMEM;
- pv->parent.br = br;
- err = __vlan_add(pv, vid, flags);
- if (err)
- goto out;
+ vlan->vid = vid;
+ vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
+ vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
+ vlan->br = br;
+ if (flags & BRIDGE_VLAN_INFO_BRENTRY)
+ atomic_set(&vlan->refcnt, 1);
+ ret = __vlan_add(vlan, flags);
+ if (ret)
+ kfree(vlan);
- rcu_assign_pointer(br->vlan_info, pv);
- return 0;
-out:
- kfree(pv);
- return err;
+ return ret;
}
/* Must be protected by RTNL.
*/
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
- struct net_port_vlans *pv;
+ struct net_bridge_vlan *v;
ASSERT_RTNL();
- pv = rtnl_dereference(br->vlan_info);
- if (!pv)
- return -EINVAL;
+ v = br_vlan_find(br->vlgrp, vid);
+ if (!v || !br_vlan_is_brentry(v))
+ return -ENOENT;
br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
- __vlan_del(pv, vid);
- return 0;
+ return __vlan_del(v);
}
void br_vlan_flush(struct net_bridge *br)
{
- struct net_port_vlans *pv;
-
ASSERT_RTNL();
- pv = rtnl_dereference(br->vlan_info);
- if (!pv)
- return;
- __vlan_flush(pv);
+ __vlan_flush(br_vlan_group(br), &br->pvid);
}
-bool br_vlan_find(struct net_bridge *br, u16 vid)
+struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
{
- struct net_port_vlans *pv;
- bool found = false;
+ if (!vg)
+ return NULL;
- rcu_read_lock();
- pv = rcu_dereference(br->vlan_info);
-
- if (!pv)
- goto out;
-
- if (test_bit(vid, pv->vlan_bitmap))
- found = true;
-
-out:
- rcu_read_unlock();
- return found;
+ return br_vlan_lookup(&vg->vlan_hash, vid);
}
/* Must be protected by RTNL. */
{
int err = 0;
struct net_bridge_port *p;
- struct net_port_vlans *pv;
+ struct net_bridge_vlan *vlan;
__be16 oldproto;
- u16 vid, errvid;
if (br->vlan_proto == proto)
return 0;
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
- pv = rtnl_dereference(p->vlan_info);
- if (!pv)
- continue;
-
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
- err = vlan_vid_add(p->dev, proto, vid);
+ list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist) {
+ err = vlan_vid_add(p->dev, proto, vlan->vid);
if (err)
goto err_filt;
}
br_recalculate_fwd_mask(br);
/* Delete VLANs for the old proto from the device filter. */
- list_for_each_entry(p, &br->port_list, list) {
- pv = rtnl_dereference(p->vlan_info);
- if (!pv)
- continue;
-
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
- vlan_vid_del(p->dev, oldproto, vid);
- }
+ list_for_each_entry(p, &br->port_list, list)
+ list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+ vlan_vid_del(p->dev, oldproto, vlan->vid);
return 0;
err_filt:
- errvid = vid;
- for_each_set_bit(vid, pv->vlan_bitmap, errvid)
- vlan_vid_del(p->dev, proto, vid);
-
- list_for_each_entry_continue_reverse(p, &br->port_list, list) {
- pv = rtnl_dereference(p->vlan_info);
- if (!pv)
- continue;
+ list_for_each_entry_continue_reverse(vlan, &p->vlgrp->vlan_list, vlist)
+ vlan_vid_del(p->dev, proto, vlan->vid);
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
- vlan_vid_del(p->dev, proto, vid);
- }
+ list_for_each_entry_continue_reverse(p, &br->port_list, list)
+ list_for_each_entry(vlan, &p->vlgrp->vlan_list, vlist)
+ vlan_vid_del(p->dev, proto, vlan->vid);
return err;
}
return err;
}
-static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
+static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 pvid,
+ u16 vid)
{
- return pv && vid == pv->pvid && test_bit(vid, pv->untagged_bitmap);
+ struct net_bridge_vlan *v;
+
+ if (vid != pvid)
+ return false;
+
+ v = br_vlan_lookup(&vg->vlan_hash, vid);
+ if (v && br_vlan_should_use(v) &&
+ (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ return true;
+
+ return false;
}
static void br_vlan_disable_default_pvid(struct net_bridge *br)
/* Disable default_pvid on all ports where it is still
* configured.
*/
- if (vlan_default_pvid(br_get_vlan_info(br), pvid))
+ if (vlan_default_pvid(br->vlgrp, br->pvid, pvid))
br_vlan_delete(br, pvid);
list_for_each_entry(p, &br->port_list, list) {
- if (vlan_default_pvid(nbp_get_vlan_info(p), pvid))
+ if (vlan_default_pvid(p->vlgrp, p->pvid, pvid))
nbp_vlan_delete(p, pvid);
}
static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
{
+ const struct net_bridge_vlan *pvent;
struct net_bridge_port *p;
u16 old_pvid;
int err = 0;
/* Update default_pvid config only if we do not conflict with
* user configuration.
*/
- if ((!old_pvid || vlan_default_pvid(br_get_vlan_info(br), old_pvid)) &&
- !br_vlan_find(br, pvid)) {
+ pvent = br_vlan_find(br->vlgrp, pvid);
+ if ((!old_pvid || vlan_default_pvid(br->vlgrp, br->pvid, old_pvid)) &&
+ (!pvent || !br_vlan_should_use(pvent))) {
err = br_vlan_add(br, pvid,
BRIDGE_VLAN_INFO_PVID |
- BRIDGE_VLAN_INFO_UNTAGGED);
+ BRIDGE_VLAN_INFO_UNTAGGED |
+ BRIDGE_VLAN_INFO_BRENTRY);
if (err)
goto out;
br_vlan_delete(br, old_pvid);
* user configuration.
*/
if ((old_pvid &&
- !vlan_default_pvid(nbp_get_vlan_info(p), old_pvid)) ||
- nbp_vlan_find(p, pvid))
+ !vlan_default_pvid(p->vlgrp, p->pvid, old_pvid)) ||
+ br_vlan_find(p->vlgrp, pvid))
continue;
err = nbp_vlan_add(p, pvid,
if (old_pvid)
br_vlan_add(br, old_pvid,
BRIDGE_VLAN_INFO_PVID |
- BRIDGE_VLAN_INFO_UNTAGGED);
+ BRIDGE_VLAN_INFO_UNTAGGED |
+ BRIDGE_VLAN_INFO_BRENTRY);
br_vlan_delete(br, pvid);
}
goto out;
int br_vlan_init(struct net_bridge *br)
{
+ int ret = -ENOMEM;
+
+ br->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
+ if (!br->vlgrp)
+ goto out;
+ ret = rhashtable_init(&br->vlgrp->vlan_hash, &br_vlan_rht_params);
+ if (ret)
+ goto err_rhtbl;
+ INIT_LIST_HEAD(&br->vlgrp->vlan_list);
br->vlan_proto = htons(ETH_P_8021Q);
br->default_pvid = 1;
- return br_vlan_add(br, 1,
- BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED);
+ ret = br_vlan_add(br, 1,
+ BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
+ BRIDGE_VLAN_INFO_BRENTRY);
+ if (ret)
+ goto err_vlan_add;
+
+out:
+ return ret;
+
+err_vlan_add:
+ rhashtable_destroy(&br->vlgrp->vlan_hash);
+err_rhtbl:
+ kfree(br->vlgrp);
+
+ goto out;
+}
+
+int nbp_vlan_init(struct net_bridge_port *p)
+{
+ int ret = -ENOMEM;
+
+ p->vlgrp = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
+ if (!p->vlgrp)
+ goto out;
+
+ ret = rhashtable_init(&p->vlgrp->vlan_hash, &br_vlan_rht_params);
+ if (ret)
+ goto err_rhtbl;
+ INIT_LIST_HEAD(&p->vlgrp->vlan_list);
+ if (p->br->default_pvid) {
+ ret = nbp_vlan_add(p, p->br->default_pvid,
+ BRIDGE_VLAN_INFO_PVID |
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (ret)
+ goto err_vlan_add;
+ }
+out:
+ return ret;
+
+err_vlan_add:
+ rhashtable_destroy(&p->vlgrp->vlan_hash);
+err_rhtbl:
+ kfree(p->vlgrp);
+
+ goto out;
}
/* Must be protected by RTNL.
*/
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
{
- struct net_port_vlans *pv = NULL;
- int err;
+ struct net_bridge_vlan *vlan;
+ int ret;
ASSERT_RTNL();
- pv = rtnl_dereference(port->vlan_info);
- if (pv)
- return __vlan_add(pv, vid, flags);
-
- /* Create port vlan infomration
- */
- pv = kzalloc(sizeof(*pv), GFP_KERNEL);
- if (!pv) {
- err = -ENOMEM;
- goto clean_up;
+ vlan = br_vlan_find(port->vlgrp, vid);
+ if (vlan) {
+ __vlan_add_flags(vlan, flags);
+ return 0;
}
- pv->port_idx = port->port_no;
- pv->parent.port = port;
- err = __vlan_add(pv, vid, flags);
- if (err)
- goto clean_up;
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
- rcu_assign_pointer(port->vlan_info, pv);
- return 0;
+ vlan->vid = vid;
+ vlan->port = port;
+ ret = __vlan_add(vlan, flags);
+ if (ret)
+ kfree(vlan);
-clean_up:
- kfree(pv);
- return err;
+ return ret;
}
/* Must be protected by RTNL.
*/
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
- struct net_port_vlans *pv;
+ struct net_bridge_vlan *v;
ASSERT_RTNL();
- pv = rtnl_dereference(port->vlan_info);
- if (!pv)
- return -EINVAL;
-
+ v = br_vlan_find(port->vlgrp, vid);
+ if (!v)
+ return -ENOENT;
br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
br_fdb_delete_by_port(port->br, port, vid, 0);
- return __vlan_del(pv, vid);
+ return __vlan_del(v);
}
void nbp_vlan_flush(struct net_bridge_port *port)
{
- struct net_port_vlans *pv;
- u16 vid;
+ struct net_bridge_vlan *vlan;
ASSERT_RTNL();
- pv = rtnl_dereference(port->vlan_info);
- if (!pv)
- return;
-
- for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
- vlan_vid_del(port->dev, port->br->vlan_proto, vid);
-
- __vlan_flush(pv);
-}
-
-bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
-{
- struct net_port_vlans *pv;
- bool found = false;
-
- rcu_read_lock();
- pv = rcu_dereference(port->vlan_info);
-
- if (!pv)
- goto out;
+ list_for_each_entry(vlan, &port->vlgrp->vlan_list, vlist)
+ vlan_vid_del(port->dev, port->br->vlan_proto, vlan->vid);
- if (test_bit(vid, pv->vlan_bitmap))
- found = true;
-
-out:
- rcu_read_unlock();
- return found;
-}
-
-int nbp_vlan_init(struct net_bridge_port *p)
-{
- return p->br->default_pvid ?
- nbp_vlan_add(p, p->br->default_pvid,
- BRIDGE_VLAN_INFO_PVID |
- BRIDGE_VLAN_INFO_UNTAGGED) :
- 0;
+ __vlan_flush(nbp_vlan_group(port), &port->pvid);
}