config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
- depends on NET && INET
+ depends on NET
select CRC16
default n
help
http://www.open-mesh.org/ for more information and user space
tools.
+config BATMAN_ADV_BLA
+ bool "Bridge Loop Avoidance"
+ depends on BATMAN_ADV && INET
+ default y
+ help
+ This option enables BLA (Bridge Loop Avoidance), a mechanism
+ to avoid Ethernet frames looping when mesh nodes are connected
+ to both the same LAN and the same mesh. If you will never use
+ more than one mesh node in the same LAN, you can safely remove
+ this feature and save some space.
config BATMAN_ADV_DEBUG
bool "B.A.T.M.A.N. debugging"
batman-adv-y += bat_iv_ogm.o
batman-adv-y += bat_sysfs.o
batman-adv-y += bitarray.o
-batman-adv-y += bridge_loop_avoidance.o
+batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
return single_open(file, tt_global_seq_print_text, net_dev);
}
+#ifdef CONFIG_BATMAN_ADV_BLA
static int bla_claim_table_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, bla_claim_table_seq_print_text, net_dev);
}
-
+#endif
static int transtable_local_open(struct inode *inode, struct file *file)
{
static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
+#ifdef CONFIG_BATMAN_ADV_BLA
static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
+#endif
static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
&bat_debuginfo_originators,
&bat_debuginfo_gateways,
&bat_debuginfo_transtable_global,
+#ifdef CONFIG_BATMAN_ADV_BLA
&bat_debuginfo_bla_claim_table,
+#endif
&bat_debuginfo_transtable_local,
&bat_debuginfo_vis_data,
NULL,
BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+#ifdef CONFIG_BATMAN_ADV_BLA
BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+#endif
BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
static struct bat_attribute *mesh_attrs[] = {
&bat_attr_aggregated_ogms,
&bat_attr_bonding,
+#ifdef CONFIG_BATMAN_ADV_BLA
&bat_attr_bridge_loop_avoidance,
+#endif
&bat_attr_fragmentation,
&bat_attr_ap_isolation,
&bat_attr_vis_mode,
#ifndef _NET_BATMAN_ADV_BLA_H_
#define _NET_BATMAN_ADV_BLA_H_
+#ifdef CONFIG_BATMAN_ADV_BLA
int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
int bla_is_backbone_gw(struct sk_buff *skb,
void bla_free(struct bat_priv *bat_priv);
#define BLA_CRC_INIT 0
+#else /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
+ short vid)
+{
+ return 0;
+}
+
+static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
+ short vid)
+{
+ return 0;
+}
+
+static inline int bla_is_backbone_gw(struct sk_buff *skb,
+ struct orig_node *orig_node,
+ int hdr_size)
+{
+ return 0;
+}
+
+static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
+ void *offset)
+{
+ return 0;
+}
+
+static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
+ uint8_t *orig)
+{
+ return 0;
+}
+
+static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+ struct bcast_packet *bcast_packet,
+ int hdr_size)
+{
+ return 0;
+}
+
+static inline void bla_update_orig_address(struct bat_priv *bat_priv,
+ struct hard_iface *primary_if,
+ struct hard_iface *oldif)
+{
+}
+
+static inline int bla_init(struct bat_priv *bat_priv)
+{
+ return 1;
+}
+
+static inline void bla_free(struct bat_priv *bat_priv)
+{
+}
+
+#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
0x00};
unsigned int header_len = 0;
int data_len = skb->len, ret;
- short vid = -1;
+ short vid __maybe_unused = -1;
bool do_bcast = false;
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
- short vid = -1;
+ short vid __maybe_unused = -1;
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
spinlock_t tq_lock; /* protects: tq_recv, tq_index */
};
+#ifdef CONFIG_BATMAN_ADV_BLA
struct bcast_duplist_entry {
uint8_t orig[ETH_ALEN];
uint16_t crc;
unsigned long entrytime;
};
+#endif
struct bat_priv {
atomic_t mesh_state;
struct hashtable_t *orig_hash;
struct hashtable_t *tt_local_hash;
struct hashtable_t *tt_global_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
struct hashtable_t *claim_hash;
struct hashtable_t *backbone_hash;
+#endif
struct list_head tt_req_list; /* list of pending tt_requests */
struct list_head tt_roam_list;
struct hashtable_t *vis_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
int bcast_duplist_curr;
struct bla_claim_dst claim_dest;
+#endif
spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
spinlock_t forw_bcast_list_lock; /* protects */
spinlock_t tt_changes_list_lock; /* protects tt_changes */
struct hlist_node list;
};
+#ifdef CONFIG_BATMAN_ADV_BLA
struct backbone_gw {
uint8_t orig[ETH_ALEN];
short vid; /* used VLAN ID */
atomic_t refcount;
struct hlist_node hash_entry;
};
+#endif
struct tt_change_node {
struct list_head list;