]> Git Repo - linux.git/blobdiff - net/core/sock.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[linux.git] / net / core / sock.c
index 611f424fb76b9349944e4dd3cf722ff33396ba61..b4f3ea2fce60cbb72d90434ad97ca26d682135a9 100644 (file)
@@ -437,7 +437,6 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int err;
-       int skb_len;
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
@@ -459,13 +458,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        skb->dev = NULL;
        skb_set_owner_r(skb, sk);
 
-       /* Cache the SKB length before we tack it onto the receive
-        * queue.  Once it is added it no longer belongs to us and
-        * may be freed by other threads of control pulling packets
-        * from the queue.
-        */
-       skb_len = skb->len;
-
        /* we escape from rcu protected region, make sure we dont leak
         * a norefcounted dst
         */
@@ -1642,18 +1634,24 @@ void sock_rfree(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_rfree);
 
+void sock_efree(struct sk_buff *skb)
+{
+       sock_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_efree);
+
+#ifdef CONFIG_INET
 void sock_edemux(struct sk_buff *skb)
 {
        struct sock *sk = skb->sk;
 
-#ifdef CONFIG_INET
        if (sk->sk_state == TCP_TIME_WAIT)
                inet_twsk_put(inet_twsk(sk));
        else
-#endif
                sock_put(sk);
 }
 EXPORT_SYMBOL(sock_edemux);
+#endif
 
 kuid_t sock_i_uid(struct sock *sk)
 {
@@ -1761,21 +1759,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                                     unsigned long data_len, int noblock,
                                     int *errcode, int max_page_order)
 {
-       struct sk_buff *skb = NULL;
-       unsigned long chunk;
-       gfp_t gfp_mask;
+       struct sk_buff *skb;
        long timeo;
        int err;
-       int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-       struct page *page;
-       int i;
-
-       err = -EMSGSIZE;
-       if (npages > MAX_SKB_FRAGS)
-               goto failure;
 
        timeo = sock_sndtimeo(sk, noblock);
-       while (!skb) {
+       for (;;) {
                err = sock_error(sk);
                if (err != 0)
                        goto failure;
@@ -1784,66 +1773,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        goto failure;
 
-               if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
-                       set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
-                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-                       err = -EAGAIN;
-                       if (!timeo)
-                               goto failure;
-                       if (signal_pending(current))
-                               goto interrupted;
-                       timeo = sock_wait_for_wmem(sk, timeo);
-                       continue;
-               }
-
-               err = -ENOBUFS;
-               gfp_mask = sk->sk_allocation;
-               if (gfp_mask & __GFP_WAIT)
-                       gfp_mask |= __GFP_REPEAT;
+               if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
+                       break;
 
-               skb = alloc_skb(header_len, gfp_mask);
-               if (!skb)
+               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+               err = -EAGAIN;
+               if (!timeo)
                        goto failure;
-
-               skb->truesize += data_len;
-
-               for (i = 0; npages > 0; i++) {
-                       int order = max_page_order;
-
-                       while (order) {
-                               if (npages >= 1 << order) {
-                                       page = alloc_pages(sk->sk_allocation |
-                                                          __GFP_COMP |
-                                                          __GFP_NOWARN |
-                                                          __GFP_NORETRY,
-                                                          order);
-                                       if (page)
-                                               goto fill_page;
-                                       /* Do not retry other high order allocations */
-                                       order = 1;
-                                       max_page_order = 0;
-                               }
-                               order--;
-                       }
-                       page = alloc_page(sk->sk_allocation);
-                       if (!page)
-                               goto failure;
-fill_page:
-                       chunk = min_t(unsigned long, data_len,
-                                     PAGE_SIZE << order);
-                       skb_fill_page_desc(skb, i, page, 0, chunk);
-                       data_len -= chunk;
-                       npages -= 1 << order;
-               }
+               if (signal_pending(current))
+                       goto interrupted;
+               timeo = sock_wait_for_wmem(sk, timeo);
        }
-
-       skb_set_owner_w(skb, sk);
+       skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
+                                  errcode, sk->sk_allocation);
+       if (skb)
+               skb_set_owner_w(skb, sk);
        return skb;
 
 interrupted:
        err = sock_intr_errno(timeo);
 failure:
-       kfree_skb(skb);
        *errcode = err;
        return NULL;
 }
@@ -2492,11 +2442,11 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
                       int level, int type)
 {
        struct sock_exterr_skb *serr;
-       struct sk_buff *skb, *skb2;
+       struct sk_buff *skb;
        int copied, err;
 
        err = -EAGAIN;
-       skb = skb_dequeue(&sk->sk_error_queue);
+       skb = sock_dequeue_err_skb(sk);
        if (skb == NULL)
                goto out;
 
@@ -2517,16 +2467,6 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
        msg->msg_flags |= MSG_ERRQUEUE;
        err = copied;
 
-       /* Reset and regenerate socket error */
-       spin_lock_bh(&sk->sk_error_queue.lock);
-       sk->sk_err = 0;
-       if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
-               sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
-               spin_unlock_bh(&sk->sk_error_queue.lock);
-               sk->sk_error_report(sk);
-       } else
-               spin_unlock_bh(&sk->sk_error_queue.lock);
-
 out_free_skb:
        kfree_skb(skb);
 out:
This page took 0.03616 seconds and 4 git commands to generate.