Revert "af_packet: add interframe drop cmsg (v6)"

This reverts commit 977750076d.

Neil is reimplementing this generically, outside of AF_PACKET.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2009-10-12 03:00:31 -07:00
parent 91b2a3f9bb
commit d5e63bded6
2 changed files with 0 additions and 35 deletions

View file

@ -48,13 +48,11 @@ struct sockaddr_ll
#define PACKET_RESERVE 12 #define PACKET_RESERVE 12
#define PACKET_TX_RING 13 #define PACKET_TX_RING 13
#define PACKET_LOSS 14 #define PACKET_LOSS 14
#define PACKET_GAPDATA 15
struct tpacket_stats struct tpacket_stats
{ {
unsigned int tp_packets; unsigned int tp_packets;
unsigned int tp_drops; unsigned int tp_drops;
unsigned int tp_gap;
}; };
struct tpacket_auxdata struct tpacket_auxdata

View file

@ -524,31 +524,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
return res; return res;
} }
/*
* If we've lost frames since the last time we queued one to the
* sk_receive_queue, we need to record it here.
* This must be called under the protection of the socket lock
* to prevent racing with other softirqs and user space
*/
static inline void record_packet_gap(struct sk_buff *skb,
struct packet_sock *po)
{
/*
* We overload the mark field here, since we're about
* to enqueue to a receive queue and no body else will
* use this field at this point
*/
skb->mark = po->stats.tp_gap;
po->stats.tp_gap = 0;
return;
}
static inline __u32 check_packet_gap(struct sk_buff *skb)
{
return skb->mark;
}
/* /*
This function makes lazy skb cloning in hope that most of packets This function makes lazy skb cloning in hope that most of packets
are discarded by BPF. are discarded by BPF.
@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
spin_lock(&sk->sk_receive_queue.lock); spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_packets++; po->stats.tp_packets++;
record_packet_gap(skb, po);
__skb_queue_tail(&sk->sk_receive_queue, skb); __skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock(&sk->sk_receive_queue.lock); spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk, skb->len); sk->sk_data_ready(sk, skb->len);
@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
drop_n_acct: drop_n_acct:
spin_lock(&sk->sk_receive_queue.lock); spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_drops++; po->stats.tp_drops++;
po->stats.tp_gap++;
spin_unlock(&sk->sk_receive_queue.lock); spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore: drop_n_restore:
@ -839,7 +812,6 @@ drop:
ring_is_full: ring_is_full:
po->stats.tp_drops++; po->stats.tp_drops++;
po->stats.tp_gap++;
spin_unlock(&sk->sk_receive_queue.lock); spin_unlock(&sk->sk_receive_queue.lock);
sk->sk_data_ready(sk, 0); sk->sk_data_ready(sk, 0);
@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb; struct sk_buff *skb;
int copied, err; int copied, err;
struct sockaddr_ll *sll; struct sockaddr_ll *sll;
__u32 gap;
err = -EINVAL; err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
} }
gap = check_packet_gap(skb);
if (gap)
put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap);
/* /*
* Free or return the buffer as appropriate. Again this * Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us. * hides all the races and re-entrancy issues from us.