get_batman_if_by_netdev and get_active_batman_if may leak data from the
rcu protected list of interfaces. The rcu protected list of all gateway
nodes leaks the actual data outside the read-side critical area. This is
not valid as we may free the data using a call_rcu created callback
after we unlock using rcu_read_unlock. A workaround is to provide a
reference count to be sure that the memory isn't freed to early.
It is currently only to implement the already existing functionality and
doesn't provide the full tracking of all usage cases.
Additionally, we must hardif_hold inside the
rcu_read_lock()..rcu_read_unlock() before we attach to the structure
which "leaks" it. When another function now removed it from its usage
context (primary_if, usage on stack, ...) then we must hardif_put it. If
it is decremented to zero then we can issue the call_rcu to the freeing
function. So "put" is not allowed inside an rcu_read_lock.
Signed-off-by: Sven Eckelmann <sven.eckelmann@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
batman_if->soft_iface = NULL;
batman_if->if_status = IF_NOT_IN_USE;
INIT_LIST_HEAD(&batman_if->list);
+ atomic_set(&batman_if->refcnt, 0);
+ hardif_hold(batman_if);
check_known_mac_addr(batman_if->net_dev->dev_addr);
list_del_rcu(&batman_if->list);
synchronize_rcu();
sysfs_del_hardif(&batman_if->hardif_obj);
- dev_put(batman_if->net_dev);
- kfree(batman_if);
+ hardif_put(batman_if);
}
void hardif_remove_interfaces(void)
int hardif_min_mtu(struct net_device *soft_iface);
void update_min_mtu(struct net_device *soft_iface);
+static inline void hardif_hold(struct batman_if *batman_if)
+{
+ atomic_inc(&batman_if->refcnt);
+}
+
+static inline void hardif_put(struct batman_if *batman_if)
+{
+ if (atomic_dec_and_test(&batman_if->refcnt)) {
+ dev_put(batman_if->net_dev);
+ kfree(batman_if);
+ }
+}
+
#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
unsigned char *packet_buff;
int packet_len;
struct kobject *hardif_obj;
+ atomic_t refcnt;
struct packet_type batman_adv_ptype;
struct net_device *soft_iface;
};