* no further reads against netdev_map. It does __not__ ensure pending
* flush operations (if any) are complete.
*/
+
+ spin_lock(&dev_map_lock);
+ list_del_rcu(&dtab->list);
+ spin_unlock(&dev_map_lock);
+
synchronize_rcu();
/* To ensure all pending flush operations have completed wait for flush
cpu_relax();
}
- /* Although we should no longer have datapath or bpf syscall operations
- * at this point we we can still race with netdev notifier, hence the
- * lock.
- */
for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev;
/* At this point bpf program is detached and all pending operations
* _must_ be complete
*/
- spin_lock(&dev_map_lock);
- list_del_rcu(&dtab->list);
- spin_unlock(&dev_map_lock);
free_percpu(dtab->flush_needed);
bpf_map_area_free(dtab->netdev_map);
kfree(dtab);