summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c65
1 files changed, 44 insertions, 21 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5901010..6afb6d8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -429,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (tp->urg_seq == tp->copied_seq &&
!sock_flag(sk, SOCK_URGINLINE) &&
tp->urg_data)
- target--;
+ target++;
/* Potential race condition. If read of tp below will
* escape above sk->sk_state, we can be illegally awaken
@@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
tp->ucopy.memory = 0;
}
+#ifdef CONFIG_NET_DMA
+static void tcp_service_net_dma(struct sock *sk, bool wait)
+{
+ dma_cookie_t done, used;
+ dma_cookie_t last_issued;
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!tp->ucopy.dma_chan)
+ return;
+
+ last_issued = tp->ucopy.dma_cookie;
+ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
+ do {
+ if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
+ last_issued, &done,
+ &used) == DMA_SUCCESS) {
+ /* Safe to free early-copied skbs now */
+ __skb_queue_purge(&sk->sk_async_wait_queue);
+ break;
+ } else {
+ struct sk_buff *skb;
+ while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
+ (dma_async_is_complete(skb->dma_cookie, done,
+ used) == DMA_SUCCESS)) {
+ __skb_dequeue(&sk->sk_async_wait_queue);
+ kfree_skb(skb);
+ }
+ }
+ } while (wait);
+}
+#endif
+
static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{
struct sk_buff *skb;
@@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* __ Set realtime policy in scheduler __ */
}
+#ifdef CONFIG_NET_DMA
+ if (tp->ucopy.dma_chan)
+ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+#endif
if (copied >= target) {
/* Do not sleep, just process backlog. */
release_sock(sk);
@@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sk_wait_data(sk, &timeo);
#ifdef CONFIG_NET_DMA
+ tcp_service_net_dma(sk, false); /* Don't block */
tp->ucopy.wakeup = 0;
#endif
@@ -1633,6 +1671,9 @@ do_prequeue:
copied = -EFAULT;
break;
}
+
+ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+
if ((offset + used) == skb->len)
copied_early = 1;
@@ -1702,27 +1743,9 @@ skip_copy:
}
#ifdef CONFIG_NET_DMA
- if (tp->ucopy.dma_chan) {
- dma_cookie_t done, used;
-
- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
-
- while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
- tp->ucopy.dma_cookie, &done,
- &used) == DMA_IN_PROGRESS) {
- /* do partial cleanup of sk_async_wait_queue */
- while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
- (dma_async_is_complete(skb->dma_cookie, done,
- used) == DMA_SUCCESS)) {
- __skb_dequeue(&sk->sk_async_wait_queue);
- kfree_skb(skb);
- }
- }
+ tcp_service_net_dma(sk, true); /* Wait for queue to drain */
+ tp->ucopy.dma_chan = NULL;
- /* Safe to free early-copied skbs now */
- __skb_queue_purge(&sk->sk_async_wait_queue);
- tp->ucopy.dma_chan = NULL;
- }
if (tp->ucopy.pinned_list) {
dma_unpin_iovec_pages(tp->ucopy.pinned_list);
tp->ucopy.pinned_list = NULL;