static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
+ unsigned int hh_alen = 0;
unsigned int seq;
unsigned int hh_len;
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
if (likely(hh_len <= HH_DATA_MOD)) {
- /* this is inlined by gcc */
- memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+ hh_alen = HH_DATA_MOD;
+
+ /* skb_push() would proceed silently if we have room for
+ * the unaligned size but not for the aligned size:
+ * check headroom explicitly.
+ */
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ }
} else {
- unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+ hh_alen = HH_DATA_ALIGN(hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ if (likely(skb_headroom(skb) >= hh_alen)) {
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
}
} while (read_seqretry(&hh->hh_lock, seq));
- skb_push(skb, hh_len);
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ __skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}