2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* SUCS NET3:
|
|
|
|
*
|
|
|
|
* Generic stream handling routines. These are generic for most
|
|
|
|
* protocols. Even IP. Tonight 8-).
|
|
|
|
* This is used because TCP, LLC (others too) layer all have mostly
|
|
|
|
* identical sendmsg() and recvmsg() code.
|
|
|
|
* So we (will) share it here.
|
|
|
|
*
|
|
|
|
* Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
|
|
|
* (from old tcp.c code)
|
2008-10-14 09:01:08 +07:00
|
|
|
* Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-))
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
|
|
|
|
/**
|
|
|
|
* sk_stream_write_space - stream socket write_space callback.
|
2005-05-01 22:59:25 +07:00
|
|
|
* @sk: socket
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* FIXME: write proper description
|
|
|
|
*/
|
|
|
|
void sk_stream_write_space(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct socket *sock = sk->sk_socket;
|
2010-04-29 18:01:49 +07:00
|
|
|
struct socket_wq *wq;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-07-23 10:26:31 +07:00
|
|
|
if (sk_stream_is_writeable(sk) && sock) {
|
2005-04-17 05:20:36 +07:00
|
|
|
clear_bit(SOCK_NOSPACE, &sock->flags);
|
|
|
|
|
2010-04-29 18:01:49 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
wq = rcu_dereference(sk->sk_wq);
|
2015-11-26 12:55:39 +07:00
|
|
|
if (skwq_has_sleeper(wq))
|
2010-04-29 18:01:49 +07:00
|
|
|
wake_up_interruptible_poll(&wq->wait, POLLOUT |
|
2009-05-12 22:34:50 +07:00
|
|
|
POLLWRNORM | POLLWRBAND);
|
2010-04-29 18:01:49 +07:00
|
|
|
if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
|
2015-11-30 11:03:11 +07:00
|
|
|
sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
|
2010-04-29 18:01:49 +07:00
|
|
|
rcu_read_unlock();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(sk_stream_write_space);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* sk_stream_wait_connect - Wait for a socket to get into the connected state
|
2005-05-01 22:59:25 +07:00
|
|
|
* @sk: sock to wait on
|
|
|
|
* @timeo_p: for how long to wait
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Must be called with the socket locked.
|
|
|
|
*/
|
|
|
|
int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
DEFINE_WAIT(wait);
|
2005-11-04 05:56:56 +07:00
|
|
|
int done;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-11-04 05:56:56 +07:00
|
|
|
do {
|
2005-12-14 14:22:19 +07:00
|
|
|
int err = sock_error(sk);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
|
|
|
|
return -EPIPE;
|
|
|
|
if (!*timeo_p)
|
|
|
|
return -EAGAIN;
|
|
|
|
if (signal_pending(tsk))
|
|
|
|
return sock_intr_errno(*timeo_p);
|
|
|
|
|
2010-04-20 20:03:51 +07:00
|
|
|
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
sk->sk_write_pending++;
|
2005-11-04 05:56:56 +07:00
|
|
|
done = sk_wait_event(sk, timeo_p,
|
2005-12-14 14:22:19 +07:00
|
|
|
!sk->sk_err &&
|
2007-02-09 21:24:36 +07:00
|
|
|
!((1 << sk->sk_state) &
|
2005-11-04 05:56:56 +07:00
|
|
|
~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
|
2010-04-20 20:03:51 +07:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-04-17 05:20:36 +07:00
|
|
|
sk->sk_write_pending--;
|
2005-11-04 05:56:56 +07:00
|
|
|
} while (!done);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(sk_stream_wait_connect);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* sk_stream_closing - Return 1 if we still have things to send in our buffers.
|
2005-05-01 22:59:25 +07:00
|
|
|
* @sk: socket to verify
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
static inline int sk_stream_closing(struct sock *sk)
|
|
|
|
{
|
|
|
|
return (1 << sk->sk_state) &
|
|
|
|
(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
|
|
|
|
}
|
|
|
|
|
|
|
|
void sk_stream_wait_close(struct sock *sk, long timeout)
|
|
|
|
{
|
|
|
|
if (timeout) {
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
do {
|
2010-04-20 20:03:51 +07:00
|
|
|
prepare_to_wait(sk_sleep(sk), &wait,
|
2005-04-17 05:20:36 +07:00
|
|
|
TASK_INTERRUPTIBLE);
|
|
|
|
if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
|
|
|
|
break;
|
|
|
|
} while (!signal_pending(current) && timeout);
|
|
|
|
|
2010-04-20 20:03:51 +07:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(sk_stream_wait_close);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* sk_stream_wait_memory - Wait for more memory for a socket
|
2005-05-01 22:59:25 +07:00
|
|
|
* @sk: socket to wait for memory
|
|
|
|
* @timeo_p: for how long
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
long vm_wait = 0;
|
|
|
|
long current_timeo = *timeo_p;
|
tcp: set SOCK_NOSPACE under memory pressure
Under tcp memory pressure, calling epoll_wait() in edge triggered
mode after -EAGAIN, can result in an indefinite hang in epoll_wait(),
even when there is sufficient memory available to continue making
progress. The problem is that when __sk_mem_schedule() returns 0
under memory pressure, we do not set the SOCK_NOSPACE flag in the
tcp write paths (tcp_sendmsg() or do_tcp_sendpages()). Then, since
SOCK_NOSPACE is used to trigger wakeups when incoming acks create
sufficient new space in the write queue, all outstanding packets
are acked, but we never wake up with the the EPOLLOUT that we are
expecting from epoll_wait().
This issue is currently limited to epoll() when used in edge trigger
mode, since 'tcp_poll()', does in fact currently set SOCK_NOSPACE.
This is sufficient for poll()/select() and epoll() in level trigger
mode. However, in edge trigger mode, epoll() is relying on the write
path to set SOCK_NOSPACE. EPOLL(7) says that in edge-trigger mode we
can only call epoll_wait() after read/write return -EAGAIN. Thus, in
the case of the socket write, we are relying on the fact that
tcp_sendmsg()/network write paths are going to issue a wakeup for
us at some point in the future when we get -EAGAIN.
Normally, epoll() edge trigger works fine when we've exceeded the
sk->sndbuf because in that case we do set SOCK_NOSPACE. However, when
we return -EAGAIN from the write path b/c we are over the tcp memory
limits and not b/c we are over the sndbuf, we are never going to get
another wakeup.
I can reproduce this issue, using SO_SNDBUF, since __sk_mem_schedule()
will return 0, or failure more readily with SO_SNDBUF:
1) create socket and set SO_SNDBUF to N
2) add socket as edge trigger
3) write to socket and block in epoll on -EAGAIN
4) cause tcp mem pressure via: echo "<small val>" > net.ipv4.tcp_mem
The fix here is simply to set SOCK_NOSPACE in sk_stream_wait_memory()
when the socket is non-blocking. Note that SOCK_NOSPACE, in addition
to waking up outstanding waiters is also used to expand the size of
the sk->sndbuf. However, we will not expand it by setting it in this
case because tcp_should_expand_sndbuf(), ensures that no expansion
occurs when we are under tcp memory pressure.
Note that we could still hang if sk->sk_wmem_queue is 0, when we get
the -EAGAIN. In this case the SOCK_NOSPACE bit will not help, since we
are waiting for and event that will never happen. I believe
that this case is harder to hit (and did not hit in my testing),
in that over the tcp 'soft' memory limits, we continue to guarantee a
minimum write buffer size. Perhaps, we could return -ENOSPC in this
case, or maybe we simply issue a wakeup in this case, such that we
keep retrying the write. Note that this case is not specific to
epoll() ET, but rather would affect blocking sockets as well. So I
view this patch as bringing epoll() edge-trigger into sync with the
current poll()/select()/epoll() level trigger and blocking sockets
behavior.
Signed-off-by: Jason Baron <jbaron@akamai.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-06 22:52:23 +07:00
|
|
|
bool noblock = (*timeo_p ? false : true);
|
2005-04-17 05:20:36 +07:00
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
if (sk_stream_memory_free(sk))
|
2014-01-11 19:15:59 +07:00
|
|
|
current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
while (1) {
|
2015-11-30 11:03:10 +07:00
|
|
|
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-04-20 20:03:51 +07:00
|
|
|
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
|
|
|
goto do_error;
|
tcp: set SOCK_NOSPACE under memory pressure
Under tcp memory pressure, calling epoll_wait() in edge triggered
mode after -EAGAIN, can result in an indefinite hang in epoll_wait(),
even when there is sufficient memory available to continue making
progress. The problem is that when __sk_mem_schedule() returns 0
under memory pressure, we do not set the SOCK_NOSPACE flag in the
tcp write paths (tcp_sendmsg() or do_tcp_sendpages()). Then, since
SOCK_NOSPACE is used to trigger wakeups when incoming acks create
sufficient new space in the write queue, all outstanding packets
are acked, but we never wake up with the the EPOLLOUT that we are
expecting from epoll_wait().
This issue is currently limited to epoll() when used in edge trigger
mode, since 'tcp_poll()', does in fact currently set SOCK_NOSPACE.
This is sufficient for poll()/select() and epoll() in level trigger
mode. However, in edge trigger mode, epoll() is relying on the write
path to set SOCK_NOSPACE. EPOLL(7) says that in edge-trigger mode we
can only call epoll_wait() after read/write return -EAGAIN. Thus, in
the case of the socket write, we are relying on the fact that
tcp_sendmsg()/network write paths are going to issue a wakeup for
us at some point in the future when we get -EAGAIN.
Normally, epoll() edge trigger works fine when we've exceeded the
sk->sndbuf because in that case we do set SOCK_NOSPACE. However, when
we return -EAGAIN from the write path b/c we are over the tcp memory
limits and not b/c we are over the sndbuf, we are never going to get
another wakeup.
I can reproduce this issue, using SO_SNDBUF, since __sk_mem_schedule()
will return 0, or failure more readily with SO_SNDBUF:
1) create socket and set SO_SNDBUF to N
2) add socket as edge trigger
3) write to socket and block in epoll on -EAGAIN
4) cause tcp mem pressure via: echo "<small val>" > net.ipv4.tcp_mem
The fix here is simply to set SOCK_NOSPACE in sk_stream_wait_memory()
when the socket is non-blocking. Note that SOCK_NOSPACE, in addition
to waking up outstanding waiters is also used to expand the size of
the sk->sndbuf. However, we will not expand it by setting it in this
case because tcp_should_expand_sndbuf(), ensures that no expansion
occurs when we are under tcp memory pressure.
Note that we could still hang if sk->sk_wmem_queue is 0, when we get
the -EAGAIN. In this case the SOCK_NOSPACE bit will not help, since we
are waiting for and event that will never happen. I believe
that this case is harder to hit (and did not hit in my testing),
in that over the tcp 'soft' memory limits, we continue to guarantee a
minimum write buffer size. Perhaps, we could return -ENOSPC in this
case, or maybe we simply issue a wakeup in this case, such that we
keep retrying the write. Note that this case is not specific to
epoll() ET, but rather would affect blocking sockets as well. So I
view this patch as bringing epoll() edge-trigger into sync with the
current poll()/select()/epoll() level trigger and blocking sockets
behavior.
Signed-off-by: Jason Baron <jbaron@akamai.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-06 22:52:23 +07:00
|
|
|
if (!*timeo_p) {
|
|
|
|
if (noblock)
|
|
|
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto do_nonblock;
|
tcp: set SOCK_NOSPACE under memory pressure
Under tcp memory pressure, calling epoll_wait() in edge triggered
mode after -EAGAIN, can result in an indefinite hang in epoll_wait(),
even when there is sufficient memory available to continue making
progress. The problem is that when __sk_mem_schedule() returns 0
under memory pressure, we do not set the SOCK_NOSPACE flag in the
tcp write paths (tcp_sendmsg() or do_tcp_sendpages()). Then, since
SOCK_NOSPACE is used to trigger wakeups when incoming acks create
sufficient new space in the write queue, all outstanding packets
are acked, but we never wake up with the the EPOLLOUT that we are
expecting from epoll_wait().
This issue is currently limited to epoll() when used in edge trigger
mode, since 'tcp_poll()', does in fact currently set SOCK_NOSPACE.
This is sufficient for poll()/select() and epoll() in level trigger
mode. However, in edge trigger mode, epoll() is relying on the write
path to set SOCK_NOSPACE. EPOLL(7) says that in edge-trigger mode we
can only call epoll_wait() after read/write return -EAGAIN. Thus, in
the case of the socket write, we are relying on the fact that
tcp_sendmsg()/network write paths are going to issue a wakeup for
us at some point in the future when we get -EAGAIN.
Normally, epoll() edge trigger works fine when we've exceeded the
sk->sndbuf because in that case we do set SOCK_NOSPACE. However, when
we return -EAGAIN from the write path b/c we are over the tcp memory
limits and not b/c we are over the sndbuf, we are never going to get
another wakeup.
I can reproduce this issue, using SO_SNDBUF, since __sk_mem_schedule()
will return 0, or failure more readily with SO_SNDBUF:
1) create socket and set SO_SNDBUF to N
2) add socket as edge trigger
3) write to socket and block in epoll on -EAGAIN
4) cause tcp mem pressure via: echo "<small val>" > net.ipv4.tcp_mem
The fix here is simply to set SOCK_NOSPACE in sk_stream_wait_memory()
when the socket is non-blocking. Note that SOCK_NOSPACE, in addition
to waking up outstanding waiters is also used to expand the size of
the sk->sndbuf. However, we will not expand it by setting it in this
case because tcp_should_expand_sndbuf(), ensures that no expansion
occurs when we are under tcp memory pressure.
Note that we could still hang if sk->sk_wmem_queue is 0, when we get
the -EAGAIN. In this case the SOCK_NOSPACE bit will not help, since we
are waiting for and event that will never happen. I believe
that this case is harder to hit (and did not hit in my testing),
in that over the tcp 'soft' memory limits, we continue to guarantee a
minimum write buffer size. Perhaps, we could return -ENOSPC in this
case, or maybe we simply issue a wakeup in this case, such that we
keep retrying the write. Note that this case is not specific to
epoll() ET, but rather would affect blocking sockets as well. So I
view this patch as bringing epoll() edge-trigger into sync with the
current poll()/select()/epoll() level trigger and blocking sockets
behavior.
Signed-off-by: Jason Baron <jbaron@akamai.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-06 22:52:23 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
if (signal_pending(current))
|
|
|
|
goto do_interrupted;
|
2015-11-30 11:03:10 +07:00
|
|
|
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (sk_stream_memory_free(sk) && !vm_wait)
|
|
|
|
break;
|
|
|
|
|
|
|
|
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
|
|
|
sk->sk_write_pending++;
|
2010-10-03 06:45:06 +07:00
|
|
|
sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
|
|
|
|
(sk->sk_shutdown & SEND_SHUTDOWN) ||
|
|
|
|
(sk_stream_memory_free(sk) &&
|
|
|
|
!vm_wait));
|
2005-04-17 05:20:36 +07:00
|
|
|
sk->sk_write_pending--;
|
|
|
|
|
|
|
|
if (vm_wait) {
|
|
|
|
vm_wait -= current_timeo;
|
|
|
|
current_timeo = *timeo_p;
|
|
|
|
if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
|
|
|
|
(current_timeo -= vm_wait) < 0)
|
|
|
|
current_timeo = 0;
|
|
|
|
vm_wait = 0;
|
|
|
|
}
|
|
|
|
*timeo_p = current_timeo;
|
|
|
|
}
|
|
|
|
out:
|
2010-04-20 20:03:51 +07:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-04-17 05:20:36 +07:00
|
|
|
return err;
|
|
|
|
|
|
|
|
do_error:
|
|
|
|
err = -EPIPE;
|
|
|
|
goto out;
|
|
|
|
do_nonblock:
|
|
|
|
err = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
do_interrupted:
|
|
|
|
err = sock_intr_errno(*timeo_p);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(sk_stream_wait_memory);
|
|
|
|
|
|
|
|
int sk_stream_error(struct sock *sk, int flags, int err)
|
|
|
|
{
|
|
|
|
if (err == -EPIPE)
|
|
|
|
err = sock_error(sk) ? : -EPIPE;
|
|
|
|
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
|
|
|
|
send_sig(SIGPIPE, current, 0);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(sk_stream_error);
|
|
|
|
|
|
|
|
void sk_stream_kill_queues(struct sock *sk)
|
|
|
|
{
|
|
|
|
/* First the read buffer. */
|
|
|
|
__skb_queue_purge(&sk->sk_receive_queue);
|
|
|
|
|
|
|
|
/* Next, the error queue. */
|
|
|
|
__skb_queue_purge(&sk->sk_error_queue);
|
|
|
|
|
|
|
|
/* Next, the write queue. */
|
2008-07-26 11:43:18 +07:00
|
|
|
WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Account for returned memory. */
|
2007-12-31 15:11:19 +07:00
|
|
|
sk_mem_reclaim(sk);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-26 11:43:18 +07:00
|
|
|
WARN_ON(sk->sk_wmem_queued);
|
|
|
|
WARN_ON(sk->sk_forward_alloc);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* It is _impossible_ for the backlog to contain anything
|
|
|
|
* when we get here. All user references to this socket
|
|
|
|
* have gone away, only the net layer knows can touch it.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(sk_stream_kill_queues);
|