* batadv_v_ogm_aggr_send() - flush & send aggregation queue
* @hard_iface: the interface with the aggregation queue to flush
*
+ * Aggregates all OGMv2 packets currently in the aggregation queue into a
+ * single OGMv2 packet and transmits this aggregate.
+ *
+ * The aggregation queue is empty after this call.
+ *
* Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
*/
static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
{
+ unsigned int aggr_len = hard_iface->bat_v.aggr_len;
+ struct sk_buff *skb_aggr;
+ unsigned int ogm_len;
struct sk_buff *skb;
lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ if (!aggr_len)
+ return;
+
+ skb_aggr = dev_alloc_skb(aggr_len + ETH_HLEN + NET_IP_ALIGN);
+ if (!skb_aggr) {
+ batadv_v_ogm_aggr_list_free(hard_iface);
+ return;
+ }
+
+ skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
+ skb_reset_network_header(skb_aggr);
+
while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) {
hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
- batadv_v_ogm_send_to_if(skb, hard_iface);
+
+ ogm_len = batadv_v_ogm_len(skb);
+ skb_put_data(skb_aggr, skb->data, ogm_len);
+
+ consume_skb(skb);
}
+
+ batadv_v_ogm_send_to_if(skb_aggr, hard_iface);
}
/**