tcp: minor optimization in tcp_add_backlog()
[ Upstream commit d519f350967a60b85a574ad8aeac43f2b4384746 ]
If packet is going to be coalesced, sk_sndbuf/sk_rcvbuf values
are not used. Defer their access to the point we need them.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: ec00ed472bdb ("tcp: avoid premature drops in tcp_add_backlog()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
539c4bf754
commit
527eaa5aa6
1 changed files with 2 additions and 3 deletions
|
|
@ -1678,8 +1678,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
|
|||
|
||||
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
|
||||
u32 tail_gso_size, tail_gso_segs;
|
||||
u32 limit, tail_gso_size, tail_gso_segs;
|
||||
struct skb_shared_info *shinfo;
|
||||
const struct tcphdr *th;
|
||||
struct tcphdr *thtail;
|
||||
|
|
@ -1786,7 +1785,7 @@ no_coalesce:
|
|||
* to reduce memory overhead, so add a little headroom here.
|
||||
* Few sockets backlog are possibly concurrently non empty.
|
||||
*/
|
||||
limit += 64*1024;
|
||||
limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
|
||||
|
||||
if (unlikely(sk_add_backlog(sk, skb, limit))) {
|
||||
bh_unlock_sock(sk);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue