net: backlog functions rename
sk_add_backlog -> __sk_add_backlog sk_add_backlog_limited -> sk_add_backlog Signed-off-by: Zhu Yi <yi.zhu@intel.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
2499849ee8
commit
a3a858ff18
13 changed files with 17 additions and 17 deletions
|
@ -592,7 +592,7 @@ static inline int sk_stream_memory_free(struct sock *sk)
|
|||
}
|
||||
|
||||
/* OOB backlog add */
|
||||
static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (!sk->sk_backlog.tail) {
|
||||
sk->sk_backlog.head = sk->sk_backlog.tail = skb;
|
||||
|
@ -604,12 +604,12 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* The per-socket spinlock must be held here. */
|
||||
static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
|
||||
static inline int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
|
||||
return -ENOBUFS;
|
||||
|
||||
sk_add_backlog(sk, skb);
|
||||
__sk_add_backlog(sk, skb);
|
||||
sk->sk_backlog.len += skb->truesize;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
|
|||
rc = sk_backlog_rcv(sk, skb);
|
||||
|
||||
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
|
||||
} else if (sk_add_backlog_limited(sk, skb)) {
|
||||
} else if (sk_add_backlog(sk, skb)) {
|
||||
bh_unlock_sock(sk);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
goto discard_and_relse;
|
||||
|
|
|
@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
|
|||
* in main socket hash table and lock on listening
|
||||
* socket does not protect us more.
|
||||
*/
|
||||
sk_add_backlog(child, skb);
|
||||
__sk_add_backlog(child, skb);
|
||||
}
|
||||
|
||||
bh_unlock_sock(child);
|
||||
|
|
|
@ -1682,7 +1682,7 @@ process:
|
|||
if (!tcp_prequeue(sk, skb))
|
||||
ret = tcp_v4_do_rcv(sk, skb);
|
||||
}
|
||||
} else if (sk_add_backlog_limited(sk, skb)) {
|
||||
} else if (sk_add_backlog(sk, skb)) {
|
||||
bh_unlock_sock(sk);
|
||||
goto discard_and_relse;
|
||||
}
|
||||
|
|
|
@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
|
|||
* in main socket hash table and lock on listening
|
||||
* socket does not protect us more.
|
||||
*/
|
||||
sk_add_backlog(child, skb);
|
||||
__sk_add_backlog(child, skb);
|
||||
}
|
||||
|
||||
bh_unlock_sock(child);
|
||||
|
|
|
@ -1371,7 +1371,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|||
bh_lock_sock(sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
rc = __udp_queue_rcv_skb(sk, skb);
|
||||
else if (sk_add_backlog_limited(sk, skb)) {
|
||||
else if (sk_add_backlog(sk, skb)) {
|
||||
bh_unlock_sock(sk);
|
||||
goto drop;
|
||||
}
|
||||
|
|
|
@ -1740,7 +1740,7 @@ process:
|
|||
if (!tcp_prequeue(sk, skb))
|
||||
ret = tcp_v6_do_rcv(sk, skb);
|
||||
}
|
||||
} else if (sk_add_backlog_limited(sk, skb)) {
|
||||
} else if (sk_add_backlog(sk, skb)) {
|
||||
bh_unlock_sock(sk);
|
||||
goto discard_and_relse;
|
||||
}
|
||||
|
|
|
@ -583,7 +583,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
|
|||
bh_lock_sock(sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
udpv6_queue_rcv_skb(sk, skb1);
|
||||
else if (sk_add_backlog_limited(sk, skb1)) {
|
||||
else if (sk_add_backlog(sk, skb1)) {
|
||||
kfree_skb(skb1);
|
||||
bh_unlock_sock(sk);
|
||||
goto drop;
|
||||
|
@ -758,7 +758,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
|||
bh_lock_sock(sk);
|
||||
if (!sock_owned_by_user(sk))
|
||||
udpv6_queue_rcv_skb(sk, skb);
|
||||
else if (sk_add_backlog_limited(sk, skb)) {
|
||||
else if (sk_add_backlog(sk, skb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
|
|
|
@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
|
|||
llc_conn_state_process(sk, skb);
|
||||
else {
|
||||
llc_set_backlog_type(skb, LLC_EVENT);
|
||||
sk_add_backlog(sk, skb);
|
||||
__sk_add_backlog(sk, skb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -827,7 +827,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
|
|||
else {
|
||||
dprintk("%s: adding to backlog...\n", __func__);
|
||||
llc_set_backlog_type(skb, LLC_PACKET);
|
||||
if (sk_add_backlog_limited(sk, skb))
|
||||
if (sk_add_backlog(sk, skb))
|
||||
goto drop_unlock;
|
||||
}
|
||||
out:
|
||||
|
|
|
@ -341,7 +341,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
sctp_bh_lock_sock(sk);
|
||||
|
||||
if (sock_owned_by_user(sk)) {
|
||||
if (sk_add_backlog_limited(sk, skb))
|
||||
if (sk_add_backlog(sk, skb))
|
||||
sctp_chunk_free(chunk);
|
||||
else
|
||||
backloged = 1;
|
||||
|
@ -375,7 +375,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|||
struct sctp_ep_common *rcvr = chunk->rcvr;
|
||||
int ret;
|
||||
|
||||
ret = sk_add_backlog_limited(sk, skb);
|
||||
ret = sk_add_backlog(sk, skb);
|
||||
if (!ret) {
|
||||
/* Hold the assoc/ep while hanging on the backlog queue.
|
||||
* This way, we know structures we need will not disappear
|
||||
|
|
|
@ -1322,7 +1322,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
|
|||
if (!sock_owned_by_user(sk)) {
|
||||
res = filter_rcv(sk, buf);
|
||||
} else {
|
||||
if (sk_add_backlog_limited(sk, buf))
|
||||
if (sk_add_backlog(sk, buf))
|
||||
res = TIPC_ERR_OVERLOAD;
|
||||
else
|
||||
res = TIPC_OK;
|
||||
|
|
|
@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
|
|||
if (!sock_owned_by_user(sk)) {
|
||||
queued = x25_process_rx_frame(sk, skb);
|
||||
} else {
|
||||
queued = !sk_add_backlog_limited(sk, skb);
|
||||
queued = !sk_add_backlog(sk, skb);
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
|
|
Loading…
Reference in a new issue