00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039 #include <linux/module.h>
00040 #include <linux/types.h>
00041 #include <linux/kernel.h>
00042 #include <linux/mm.h>
00043 #include <linux/interrupt.h>
00044 #include <linux/in.h>
00045 #include <linux/inet.h>
00046 #include <linux/slab.h>
00047 #include <linux/netdevice.h>
00048 #ifdef CONFIG_NET_CLS_ACT
00049 #include <net/pkt_sched.h>
00050 #endif
00051 #include <linux/string.h>
00052 #include <linux/skbuff.h>
00053 #include <linux/splice.h>
00054 #include <linux/cache.h>
00055 #include <linux/rtnetlink.h>
00056 #include <linux/init.h>
00057 #include <linux/scatterlist.h>
00058
00059 #include <net/protocol.h>
00060 #include <net/dst.h>
00061 #include <net/sock.h>
00062 #include <net/checksum.h>
00063 #ifndef DDE_LINUX
00064 #include <net/xfrm.h>
00065 #endif
00066
00067 #include "local.h"
00068
00069 #include <asm/uaccess.h>
00070 #include <asm/system.h>
00071
00072 #include "kmap_skb.h"
00073
00074 static struct kmem_cache *skbuff_head_cache __read_mostly;
00075 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
00076
00077 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
00078 struct pipe_buffer *buf)
00079 {
00080 put_page(buf->page);
00081 }
00082
00083 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
00084 struct pipe_buffer *buf)
00085 {
00086 get_page(buf->page);
00087 }
00088
00089 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
00090 struct pipe_buffer *buf)
00091 {
00092 return 1;
00093 }
00094
00095
00096
00097 static struct pipe_buf_operations sock_pipe_buf_ops = {
00098 .can_merge = 0,
00099 .map = generic_pipe_buf_map,
00100 .unmap = generic_pipe_buf_unmap,
00101 .confirm = generic_pipe_buf_confirm,
00102 .release = sock_pipe_buf_release,
00103 .steal = sock_pipe_buf_steal,
00104 .get = sock_pipe_buf_get,
00105 };
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
00122 {
00123 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
00124 "data:%p tail:%#lx end:%#lx dev:%s\n",
00125 here, skb->len, sz, skb->head, skb->data,
00126 (unsigned long)skb->tail, (unsigned long)skb->end,
00127 skb->dev ? skb->dev->name : "<NULL>");
00128 BUG();
00129 }
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
00141 {
00142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
00143 "data:%p tail:%#lx end:%#lx dev:%s\n",
00144 here, skb->len, sz, skb->head, skb->data,
00145 (unsigned long)skb->tail, (unsigned long)skb->end,
00146 skb->dev ? skb->dev->name : "<NULL>");
00147 BUG();
00148 }
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
00172 int fclone, int node)
00173 {
00174 struct kmem_cache *cache;
00175 struct skb_shared_info *shinfo;
00176 struct sk_buff *skb;
00177 u8 *data;
00178
00179 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
00180
00181
00182 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
00183 if (!skb)
00184 goto out;
00185
00186 size = SKB_DATA_ALIGN(size);
00187 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
00188 gfp_mask, node);
00189 if (!data)
00190 goto nodata;
00191
00192
00193
00194
00195
00196
00197 memset(skb, 0, offsetof(struct sk_buff, tail));
00198 skb->truesize = size + sizeof(struct sk_buff);
00199 atomic_set(&skb->users, 1);
00200 skb->head = data;
00201 skb->data = data;
00202 skb_reset_tail_pointer(skb);
00203 skb->end = skb->tail + size;
00204
00205 shinfo = skb_shinfo(skb);
00206 atomic_set(&shinfo->dataref, 1);
00207 shinfo->nr_frags = 0;
00208 shinfo->gso_size = 0;
00209 shinfo->gso_segs = 0;
00210 shinfo->gso_type = 0;
00211 shinfo->ip6_frag_id = 0;
00212 shinfo->frag_list = NULL;
00213
00214 if (fclone) {
00215 struct sk_buff *child = skb + 1;
00216 atomic_t *fclone_ref = (atomic_t *) (child + 1);
00217
00218 skb->fclone = SKB_FCLONE_ORIG;
00219 atomic_set(fclone_ref, 1);
00220
00221 child->fclone = SKB_FCLONE_UNAVAILABLE;
00222 }
00223 out:
00224 return skb;
00225 nodata:
00226 kmem_cache_free(cache, skb);
00227 skb = NULL;
00228 goto out;
00229 }
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
00245 unsigned int length, gfp_t gfp_mask)
00246 {
00247 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
00248 struct sk_buff *skb;
00249
00250 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
00251 if (likely(skb)) {
00252 skb_reserve(skb, NET_SKB_PAD);
00253 skb->dev = dev;
00254 }
00255 return skb;
00256 }
00257
00258 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
00259 {
00260 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
00261 struct page *page;
00262
00263 page = alloc_pages_node(node, gfp_mask, 0);
00264 return page;
00265 }
00266 EXPORT_SYMBOL(__netdev_alloc_page);
00267
00268 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
00269 int size)
00270 {
00271 skb_fill_page_desc(skb, i, page, off, size);
00272 skb->len += size;
00273 skb->data_len += size;
00274 skb->truesize += size;
00275 }
00276 EXPORT_SYMBOL(skb_add_rx_frag);
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290 struct sk_buff *dev_alloc_skb(unsigned int length)
00291 {
00292
00293
00294
00295
00296 return __dev_alloc_skb(length, GFP_ATOMIC);
00297 }
00298 EXPORT_SYMBOL(dev_alloc_skb);
00299
00300 static void skb_drop_list(struct sk_buff **listp)
00301 {
00302 struct sk_buff *list = *listp;
00303
00304 *listp = NULL;
00305
00306 do {
00307 struct sk_buff *this = list;
00308 list = list->next;
00309 kfree_skb(this);
00310 } while (list);
00311 }
00312
00313 static inline void skb_drop_fraglist(struct sk_buff *skb)
00314 {
00315 skb_drop_list(&skb_shinfo(skb)->frag_list);
00316 }
00317
00318 static void skb_clone_fraglist(struct sk_buff *skb)
00319 {
00320 struct sk_buff *list;
00321
00322 for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
00323 skb_get(list);
00324 }
00325
00326 static void skb_release_data(struct sk_buff *skb)
00327 {
00328 if (!skb->cloned ||
00329 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
00330 &skb_shinfo(skb)->dataref)) {
00331 if (skb_shinfo(skb)->nr_frags) {
00332 int i;
00333 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
00334 put_page(skb_shinfo(skb)->frags[i].page);
00335 }
00336
00337 if (skb_shinfo(skb)->frag_list)
00338 skb_drop_fraglist(skb);
00339
00340 kfree(skb->head);
00341 }
00342 }
00343
00344
00345
00346
00347 static void kfree_skbmem(struct sk_buff *skb)
00348 {
00349 struct sk_buff *other;
00350 atomic_t *fclone_ref;
00351
00352 switch (skb->fclone) {
00353 case SKB_FCLONE_UNAVAILABLE:
00354 kmem_cache_free(skbuff_head_cache, skb);
00355 break;
00356
00357 case SKB_FCLONE_ORIG:
00358 fclone_ref = (atomic_t *) (skb + 2);
00359 if (atomic_dec_and_test(fclone_ref))
00360 kmem_cache_free(skbuff_fclone_cache, skb);
00361 break;
00362
00363 case SKB_FCLONE_CLONE:
00364 fclone_ref = (atomic_t *) (skb + 1);
00365 other = skb - 1;
00366
00367
00368
00369
00370 skb->fclone = SKB_FCLONE_UNAVAILABLE;
00371
00372 if (atomic_dec_and_test(fclone_ref))
00373 kmem_cache_free(skbuff_fclone_cache, other);
00374 break;
00375 }
00376 }
00377
00378 static void skb_release_head_state(struct sk_buff *skb)
00379 {
00380 #ifndef DDE_LINUX
00381 dst_release(skb->dst);
00382 #endif
00383 #ifdef CONFIG_XFRM
00384 secpath_put(skb->sp);
00385 #endif
00386 if (skb->destructor) {
00387 WARN_ON(in_irq());
00388 skb->destructor(skb);
00389 }
00390 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
00391 nf_conntrack_put(skb->nfct);
00392 nf_conntrack_put_reasm(skb->nfct_reasm);
00393 #endif
00394 #ifdef CONFIG_BRIDGE_NETFILTER
00395 nf_bridge_put(skb->nf_bridge);
00396 #endif
00397
00398 #ifdef CONFIG_NET_SCHED
00399 skb->tc_index = 0;
00400 #ifdef CONFIG_NET_CLS_ACT
00401 skb->tc_verd = 0;
00402 #endif
00403 #endif
00404 }
00405
00406
00407 static void skb_release_all(struct sk_buff *skb)
00408 {
00409 skb_release_head_state(skb);
00410 skb_release_data(skb);
00411 }
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422 void __kfree_skb(struct sk_buff *skb)
00423 {
00424 skb_release_all(skb);
00425 kfree_skbmem(skb);
00426 }
00427
00428
00429
00430
00431
00432
00433
00434
00435 void kfree_skb(struct sk_buff *skb)
00436 {
00437 if (unlikely(!skb))
00438 return;
00439 if (likely(atomic_read(&skb->users) == 1))
00440 smp_rmb();
00441 else if (likely(!atomic_dec_and_test(&skb->users)))
00442 return;
00443 __kfree_skb(skb);
00444 }
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458 int skb_recycle_check(struct sk_buff *skb, int skb_size)
00459 {
00460 struct skb_shared_info *shinfo;
00461
00462 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
00463 return 0;
00464
00465 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
00466 if (skb_end_pointer(skb) - skb->head < skb_size)
00467 return 0;
00468
00469 if (skb_shared(skb) || skb_cloned(skb))
00470 return 0;
00471
00472 skb_release_head_state(skb);
00473 shinfo = skb_shinfo(skb);
00474 atomic_set(&shinfo->dataref, 1);
00475 shinfo->nr_frags = 0;
00476 shinfo->gso_size = 0;
00477 shinfo->gso_segs = 0;
00478 shinfo->gso_type = 0;
00479 shinfo->ip6_frag_id = 0;
00480 shinfo->frag_list = NULL;
00481
00482 memset(skb, 0, offsetof(struct sk_buff, tail));
00483 skb->data = skb->head + NET_SKB_PAD;
00484 skb_reset_tail_pointer(skb);
00485
00486 return 1;
00487 }
00488 EXPORT_SYMBOL(skb_recycle_check);
00489
00490 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
00491 {
00492 new->tstamp = old->tstamp;
00493 new->dev = old->dev;
00494 new->transport_header = old->transport_header;
00495 new->network_header = old->network_header;
00496 new->mac_header = old->mac_header;
00497 new->dst = dst_clone(old->dst);
00498 #ifdef CONFIG_XFRM
00499 new->sp = secpath_get(old->sp);
00500 #endif
00501 memcpy(new->cb, old->cb, sizeof(old->cb));
00502 new->csum_start = old->csum_start;
00503 new->csum_offset = old->csum_offset;
00504 new->local_df = old->local_df;
00505 new->pkt_type = old->pkt_type;
00506 new->ip_summed = old->ip_summed;
00507 skb_copy_queue_mapping(new, old);
00508 new->priority = old->priority;
00509 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
00510 new->ipvs_property = old->ipvs_property;
00511 #endif
00512 new->protocol = old->protocol;
00513 new->mark = old->mark;
00514 __nf_copy(new, old);
00515 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
00516 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
00517 new->nf_trace = old->nf_trace;
00518 #endif
00519 #ifdef CONFIG_NET_SCHED
00520 new->tc_index = old->tc_index;
00521 #ifdef CONFIG_NET_CLS_ACT
00522 new->tc_verd = old->tc_verd;
00523 #endif
00524 #endif
00525 new->vlan_tci = old->vlan_tci;
00526
00527 skb_copy_secmark(new, old);
00528 }
00529
00530 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
00531 {
00532 #define C(x) n->x = skb->x
00533
00534 n->next = n->prev = NULL;
00535 n->sk = NULL;
00536 __copy_skb_header(n, skb);
00537
00538 C(len);
00539 C(data_len);
00540 C(mac_len);
00541 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
00542 n->cloned = 1;
00543 n->nohdr = 0;
00544 n->destructor = NULL;
00545 C(iif);
00546 C(tail);
00547 C(end);
00548 C(head);
00549 C(data);
00550 C(truesize);
00551 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
00552 C(do_not_encrypt);
00553 C(requeue);
00554 #endif
00555 atomic_set(&n->users, 1);
00556
00557 atomic_inc(&(skb_shinfo(skb)->dataref));
00558 skb->cloned = 1;
00559
00560 return n;
00561 #undef C
00562 }
00563
00564
00565
00566
00567
00568
00569
00570
00571
00572
00573
00574 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
00575 {
00576 skb_release_all(dst);
00577 return __skb_clone(dst, src);
00578 }
00579 EXPORT_SYMBOL_GPL(skb_morph);
00580
00581
00582
00583
00584
00585
00586
00587
00588
00589
00590
00591
00592
00593
00594
00595 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
00596 {
00597 struct sk_buff *n;
00598
00599 n = skb + 1;
00600 if (skb->fclone == SKB_FCLONE_ORIG &&
00601 n->fclone == SKB_FCLONE_UNAVAILABLE) {
00602 atomic_t *fclone_ref = (atomic_t *) (n + 1);
00603 n->fclone = SKB_FCLONE_CLONE;
00604 atomic_inc(fclone_ref);
00605 } else {
00606 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
00607 if (!n)
00608 return NULL;
00609 n->fclone = SKB_FCLONE_UNAVAILABLE;
00610 }
00611
00612 return __skb_clone(n, skb);
00613 }
00614
00615 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
00616 {
00617 #ifndef NET_SKBUFF_DATA_USES_OFFSET
00618
00619
00620
00621 unsigned long offset = new->data - old->data;
00622 #endif
00623
00624 __copy_skb_header(new, old);
00625
00626 #ifndef NET_SKBUFF_DATA_USES_OFFSET
00627
00628 new->transport_header += offset;
00629 new->network_header += offset;
00630 new->mac_header += offset;
00631 #endif
00632 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
00633 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
00634 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
00635 }
00636
00637
00638
00639
00640
00641
00642
00643
00644
00645
00646
00647
00648
00649
00650
00651
00652
00653
00654 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
00655 {
00656 int headerlen = skb->data - skb->head;
00657
00658
00659
00660 struct sk_buff *n;
00661 #ifdef NET_SKBUFF_DATA_USES_OFFSET
00662 n = alloc_skb(skb->end + skb->data_len, gfp_mask);
00663 #else
00664 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
00665 #endif
00666 if (!n)
00667 return NULL;
00668
00669
00670 skb_reserve(n, headerlen);
00671
00672 skb_put(n, skb->len);
00673
00674 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
00675 BUG();
00676
00677 copy_skb_header(n, skb);
00678 return n;
00679 }
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
00696 {
00697
00698
00699
00700 struct sk_buff *n;
00701 #ifdef NET_SKBUFF_DATA_USES_OFFSET
00702 n = alloc_skb(skb->end, gfp_mask);
00703 #else
00704 n = alloc_skb(skb->end - skb->head, gfp_mask);
00705 #endif
00706 if (!n)
00707 goto out;
00708
00709
00710 skb_reserve(n, skb->data - skb->head);
00711
00712 skb_put(n, skb_headlen(skb));
00713
00714 skb_copy_from_linear_data(skb, n->data, n->len);
00715
00716 n->truesize += skb->data_len;
00717 n->data_len = skb->data_len;
00718 n->len = skb->len;
00719
00720 if (skb_shinfo(skb)->nr_frags) {
00721 int i;
00722
00723 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
00724 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
00725 get_page(skb_shinfo(n)->frags[i].page);
00726 }
00727 skb_shinfo(n)->nr_frags = i;
00728 }
00729
00730 if (skb_shinfo(skb)->frag_list) {
00731 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
00732 skb_clone_fraglist(n);
00733 }
00734
00735 copy_skb_header(n, skb);
00736 out:
00737 return n;
00738 }
00739
00740
00741
00742
00743
00744
00745
00746
00747
00748
00749
00750
00751
00752
00753
00754
00755
00756 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
00757 gfp_t gfp_mask)
00758 {
00759 int i;
00760 u8 *data;
00761 #ifdef NET_SKBUFF_DATA_USES_OFFSET
00762 int size = nhead + skb->end + ntail;
00763 #else
00764 int size = nhead + (skb->end - skb->head) + ntail;
00765 #endif
00766 long off;
00767
00768 BUG_ON(nhead < 0);
00769
00770 if (skb_shared(skb))
00771 BUG();
00772
00773 size = SKB_DATA_ALIGN(size);
00774
00775 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
00776 if (!data)
00777 goto nodata;
00778
00779
00780
00781 #ifdef NET_SKBUFF_DATA_USES_OFFSET
00782 memcpy(data + nhead, skb->head, skb->tail);
00783 #else
00784 memcpy(data + nhead, skb->head, skb->tail - skb->head);
00785 #endif
00786 memcpy(data + size, skb_end_pointer(skb),
00787 sizeof(struct skb_shared_info));
00788
00789 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
00790 get_page(skb_shinfo(skb)->frags[i].page);
00791
00792 if (skb_shinfo(skb)->frag_list)
00793 skb_clone_fraglist(skb);
00794
00795 skb_release_data(skb);
00796
00797 off = (data + nhead) - skb->head;
00798
00799 skb->head = data;
00800 skb->data += off;
00801 #ifdef NET_SKBUFF_DATA_USES_OFFSET
00802 skb->end = size;
00803 off = nhead;
00804 #else
00805 skb->end = skb->head + size;
00806 #endif
00807
00808 skb->tail += off;
00809 skb->transport_header += off;
00810 skb->network_header += off;
00811 skb->mac_header += off;
00812 skb->csum_start += nhead;
00813 skb->cloned = 0;
00814 skb->hdr_len = 0;
00815 skb->nohdr = 0;
00816 atomic_set(&skb_shinfo(skb)->dataref, 1);
00817 return 0;
00818
00819 nodata:
00820 return -ENOMEM;
00821 }
00822
00823
00824
00825 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
00826 {
00827 struct sk_buff *skb2;
00828 int delta = headroom - skb_headroom(skb);
00829
00830 if (delta <= 0)
00831 skb2 = pskb_copy(skb, GFP_ATOMIC);
00832 else {
00833 skb2 = skb_clone(skb, GFP_ATOMIC);
00834 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
00835 GFP_ATOMIC)) {
00836 kfree_skb(skb2);
00837 skb2 = NULL;
00838 }
00839 }
00840 return skb2;
00841 }
00842
00843
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
00863 int newheadroom, int newtailroom,
00864 gfp_t gfp_mask)
00865 {
00866
00867
00868
00869 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
00870 gfp_mask);
00871 int oldheadroom = skb_headroom(skb);
00872 int head_copy_len, head_copy_off;
00873 int off;
00874
00875 if (!n)
00876 return NULL;
00877
00878 skb_reserve(n, newheadroom);
00879
00880
00881 skb_put(n, skb->len);
00882
00883 head_copy_len = oldheadroom;
00884 head_copy_off = 0;
00885 if (newheadroom <= head_copy_len)
00886 head_copy_len = newheadroom;
00887 else
00888 head_copy_off = newheadroom - head_copy_len;
00889
00890
00891 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
00892 skb->len + head_copy_len))
00893 BUG();
00894
00895 copy_skb_header(n, skb);
00896
00897 off = newheadroom - oldheadroom;
00898 n->csum_start += off;
00899 #ifdef NET_SKBUFF_DATA_USES_OFFSET
00900 n->transport_header += off;
00901 n->network_header += off;
00902 n->mac_header += off;
00903 #endif
00904
00905 return n;
00906 }
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920 int skb_pad(struct sk_buff *skb, int pad)
00921 {
00922 int err;
00923 int ntail;
00924
00925
00926 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
00927 memset(skb->data+skb->len, 0, pad);
00928 return 0;
00929 }
00930
00931 ntail = skb->data_len + pad - (skb->end - skb->tail);
00932 if (likely(skb_cloned(skb) || ntail > 0)) {
00933 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
00934 if (unlikely(err))
00935 goto free_skb;
00936 }
00937
00938
00939
00940
00941 err = skb_linearize(skb);
00942 if (unlikely(err))
00943 goto free_skb;
00944
00945 memset(skb->data + skb->len, 0, pad);
00946 return 0;
00947
00948 free_skb:
00949 kfree_skb(skb);
00950 return err;
00951 }
00952
00953
00954
00955
00956
00957
00958
00959
00960
00961
00962 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
00963 {
00964 unsigned char *tmp = skb_tail_pointer(skb);
00965 SKB_LINEAR_ASSERT(skb);
00966 skb->tail += len;
00967 skb->len += len;
00968 if (unlikely(skb->tail > skb->end))
00969 skb_over_panic(skb, len, __builtin_return_address(0));
00970 return tmp;
00971 }
00972 EXPORT_SYMBOL(skb_put);
00973
00974
00975
00976
00977
00978
00979
00980
00981
00982
00983 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
00984 {
00985 skb->data -= len;
00986 skb->len += len;
00987 if (unlikely(skb->data<skb->head))
00988 skb_under_panic(skb, len, __builtin_return_address(0));
00989 return skb->data;
00990 }
00991 EXPORT_SYMBOL(skb_push);
00992
00993
00994
00995
00996
00997
00998
00999
01000
01001
01002
01003 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
01004 {
01005 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
01006 }
01007 EXPORT_SYMBOL(skb_pull);
01008
01009
01010
01011
01012
01013
01014
01015
01016
01017
01018 void skb_trim(struct sk_buff *skb, unsigned int len)
01019 {
01020 if (skb->len > len)
01021 __skb_trim(skb, len);
01022 }
01023 EXPORT_SYMBOL(skb_trim);
01024
01025
01026
01027
01028 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
01029 {
01030 struct sk_buff **fragp;
01031 struct sk_buff *frag;
01032 int offset = skb_headlen(skb);
01033 int nfrags = skb_shinfo(skb)->nr_frags;
01034 int i;
01035 int err;
01036
01037 if (skb_cloned(skb) &&
01038 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
01039 return err;
01040
01041 i = 0;
01042 if (offset >= len)
01043 goto drop_pages;
01044
01045 for (; i < nfrags; i++) {
01046 int end = offset + skb_shinfo(skb)->frags[i].size;
01047
01048 if (end < len) {
01049 offset = end;
01050 continue;
01051 }
01052
01053 skb_shinfo(skb)->frags[i++].size = len - offset;
01054
01055 drop_pages:
01056 skb_shinfo(skb)->nr_frags = i;
01057
01058 for (; i < nfrags; i++)
01059 put_page(skb_shinfo(skb)->frags[i].page);
01060
01061 if (skb_shinfo(skb)->frag_list)
01062 skb_drop_fraglist(skb);
01063 goto done;
01064 }
01065
01066 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
01067 fragp = &frag->next) {
01068 int end = offset + frag->len;
01069
01070 if (skb_shared(frag)) {
01071 struct sk_buff *nfrag;
01072
01073 nfrag = skb_clone(frag, GFP_ATOMIC);
01074 if (unlikely(!nfrag))
01075 return -ENOMEM;
01076
01077 nfrag->next = frag->next;
01078 kfree_skb(frag);
01079 frag = nfrag;
01080 *fragp = frag;
01081 }
01082
01083 if (end < len) {
01084 offset = end;
01085 continue;
01086 }
01087
01088 if (end > len &&
01089 unlikely((err = pskb_trim(frag, len - offset))))
01090 return err;
01091
01092 if (frag->next)
01093 skb_drop_list(&frag->next);
01094 break;
01095 }
01096
01097 done:
01098 if (len > skb_headlen(skb)) {
01099 skb->data_len -= skb->len - len;
01100 skb->len = len;
01101 } else {
01102 skb->len = len;
01103 skb->data_len = 0;
01104 skb_set_tail_pointer(skb, len);
01105 }
01106
01107 return 0;
01108 }
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
01136 {
01137
01138
01139
01140
01141 int i, k, eat = (skb->tail + delta) - skb->end;
01142
01143 if (eat > 0 || skb_cloned(skb)) {
01144 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
01145 GFP_ATOMIC))
01146 return NULL;
01147 }
01148
01149 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
01150 BUG();
01151
01152
01153
01154
01155 if (!skb_shinfo(skb)->frag_list)
01156 goto pull_pages;
01157
01158
01159 eat = delta;
01160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
01161 if (skb_shinfo(skb)->frags[i].size >= eat)
01162 goto pull_pages;
01163 eat -= skb_shinfo(skb)->frags[i].size;
01164 }
01165
01166
01167
01168
01169
01170
01171
01172
01173 if (eat) {
01174 struct sk_buff *list = skb_shinfo(skb)->frag_list;
01175 struct sk_buff *clone = NULL;
01176 struct sk_buff *insp = NULL;
01177
01178 do {
01179 BUG_ON(!list);
01180
01181 if (list->len <= eat) {
01182
01183 eat -= list->len;
01184 list = list->next;
01185 insp = list;
01186 } else {
01187
01188
01189 if (skb_shared(list)) {
01190
01191 clone = skb_clone(list, GFP_ATOMIC);
01192 if (!clone)
01193 return NULL;
01194 insp = list->next;
01195 list = clone;
01196 } else {
01197
01198
01199 insp = list;
01200 }
01201 if (!pskb_pull(list, eat)) {
01202 if (clone)
01203 kfree_skb(clone);
01204 return NULL;
01205 }
01206 break;
01207 }
01208 } while (eat);
01209
01210
01211 while ((list = skb_shinfo(skb)->frag_list) != insp) {
01212 skb_shinfo(skb)->frag_list = list->next;
01213 kfree_skb(list);
01214 }
01215
01216 if (clone) {
01217 clone->next = list;
01218 skb_shinfo(skb)->frag_list = clone;
01219 }
01220 }
01221
01222
01223 pull_pages:
01224 eat = delta;
01225 k = 0;
01226 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
01227 if (skb_shinfo(skb)->frags[i].size <= eat) {
01228 put_page(skb_shinfo(skb)->frags[i].page);
01229 eat -= skb_shinfo(skb)->frags[i].size;
01230 } else {
01231 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
01232 if (eat) {
01233 skb_shinfo(skb)->frags[k].page_offset += eat;
01234 skb_shinfo(skb)->frags[k].size -= eat;
01235 eat = 0;
01236 }
01237 k++;
01238 }
01239 }
01240 skb_shinfo(skb)->nr_frags = k;
01241
01242 skb->tail += delta;
01243 skb->data_len -= delta;
01244
01245 return skb_tail_pointer(skb);
01246 }
01247
01248
01249
01250 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
01251 {
01252 int i, copy;
01253 int start = skb_headlen(skb);
01254
01255 if (offset > (int)skb->len - len)
01256 goto fault;
01257
01258
01259 if ((copy = start - offset) > 0) {
01260 if (copy > len)
01261 copy = len;
01262 skb_copy_from_linear_data_offset(skb, offset, to, copy);
01263 if ((len -= copy) == 0)
01264 return 0;
01265 offset += copy;
01266 to += copy;
01267 }
01268
01269 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
01270 int end;
01271
01272 WARN_ON(start > offset + len);
01273
01274 end = start + skb_shinfo(skb)->frags[i].size;
01275 if ((copy = end - offset) > 0) {
01276 u8 *vaddr;
01277
01278 if (copy > len)
01279 copy = len;
01280
01281 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
01282 memcpy(to,
01283 vaddr + skb_shinfo(skb)->frags[i].page_offset+
01284 offset - start, copy);
01285 kunmap_skb_frag(vaddr);
01286
01287 if ((len -= copy) == 0)
01288 return 0;
01289 offset += copy;
01290 to += copy;
01291 }
01292 start = end;
01293 }
01294
01295 if (skb_shinfo(skb)->frag_list) {
01296 struct sk_buff *list = skb_shinfo(skb)->frag_list;
01297
01298 for (; list; list = list->next) {
01299 int end;
01300
01301 WARN_ON(start > offset + len);
01302
01303 end = start + list->len;
01304 if ((copy = end - offset) > 0) {
01305 if (copy > len)
01306 copy = len;
01307 if (skb_copy_bits(list, offset - start,
01308 to, copy))
01309 goto fault;
01310 if ((len -= copy) == 0)
01311 return 0;
01312 offset += copy;
01313 to += copy;
01314 }
01315 start = end;
01316 }
01317 }
01318 if (!len)
01319 return 0;
01320
01321 fault:
01322 return -EFAULT;
01323 }
01324
01325
01326
01327
01328
01329 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
01330 {
01331 put_page(spd->pages[i]);
01332 }
01333
01334 static inline struct page *linear_to_page(struct page *page, unsigned int len,
01335 unsigned int offset)
01336 {
01337 struct page *p = alloc_pages(GFP_KERNEL, 0);
01338
01339 if (!p)
01340 return NULL;
01341 memcpy(page_address(p) + offset, page_address(page) + offset, len);
01342
01343 return p;
01344 }
01345
01346
01347
01348
01349 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
01350 unsigned int len, unsigned int offset,
01351 struct sk_buff *skb, int linear)
01352 {
01353 if (unlikely(spd->nr_pages == PIPE_BUFFERS))
01354 return 1;
01355
01356 if (linear) {
01357 page = linear_to_page(page, len, offset);
01358 if (!page)
01359 return 1;
01360 } else
01361 get_page(page);
01362
01363 spd->pages[spd->nr_pages] = page;
01364 spd->partial[spd->nr_pages].len = len;
01365 spd->partial[spd->nr_pages].offset = offset;
01366 spd->nr_pages++;
01367
01368 return 0;
01369 }
01370
01371 static inline void __segment_seek(struct page **page, unsigned int *poff,
01372 unsigned int *plen, unsigned int off)
01373 {
01374 *poff += off;
01375 *page += *poff / PAGE_SIZE;
01376 *poff = *poff % PAGE_SIZE;
01377 *plen -= off;
01378 }
01379
01380 static inline int __splice_segment(struct page *page, unsigned int poff,
01381 unsigned int plen, unsigned int *off,
01382 unsigned int *len, struct sk_buff *skb,
01383 struct splice_pipe_desc *spd, int linear)
01384 {
01385 if (!*len)
01386 return 1;
01387
01388
01389 if (*off >= plen) {
01390 *off -= plen;
01391 return 0;
01392 }
01393
01394
01395 if (*off) {
01396 __segment_seek(&page, &poff, &plen, *off);
01397 *off = 0;
01398 }
01399
01400 do {
01401 unsigned int flen = min(*len, plen);
01402
01403
01404 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
01405
01406 if (spd_fill_page(spd, page, flen, poff, skb, linear))
01407 return 1;
01408
01409 __segment_seek(&page, &poff, &plen, flen);
01410 *len -= flen;
01411
01412 } while (*len && plen);
01413
01414 return 0;
01415 }
01416
01417
01418
01419
01420
01421 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
01422 unsigned int *len,
01423 struct splice_pipe_desc *spd)
01424 {
01425 int seg;
01426
01427
01428
01429
01430 if (__splice_segment(virt_to_page(skb->data),
01431 (unsigned long) skb->data & (PAGE_SIZE - 1),
01432 skb_headlen(skb),
01433 offset, len, skb, spd, 1))
01434 return 1;
01435
01436
01437
01438
01439 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
01440 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
01441
01442 if (__splice_segment(f->page, f->page_offset, f->size,
01443 offset, len, skb, spd, 0))
01444 return 1;
01445 }
01446
01447 return 0;
01448 }
01449
01450
01451
01452
01453
01454
01455
01456 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
01457 struct pipe_inode_info *pipe, unsigned int tlen,
01458 unsigned int flags)
01459 {
01460 struct partial_page partial[PIPE_BUFFERS];
01461 struct page *pages[PIPE_BUFFERS];
01462 struct splice_pipe_desc spd = {
01463 .pages = pages,
01464 .partial = partial,
01465 .flags = flags,
01466 .ops = &sock_pipe_buf_ops,
01467 .spd_release = sock_spd_release,
01468 };
01469
01470
01471
01472
01473
01474 if (__skb_splice_bits(skb, &offset, &tlen, &spd))
01475 goto done;
01476 else if (!tlen)
01477 goto done;
01478
01479
01480
01481
01482 if (skb_shinfo(skb)->frag_list) {
01483 struct sk_buff *list = skb_shinfo(skb)->frag_list;
01484
01485 for (; list && tlen; list = list->next) {
01486 if (__skb_splice_bits(list, &offset, &tlen, &spd))
01487 break;
01488 }
01489 }
01490
01491 done:
01492 if (spd.nr_pages) {
01493 struct sock *sk = skb->sk;
01494 int ret;
01495
01496
01497
01498
01499
01500
01501
01502
01503
01504
01505 release_sock(sk);
01506 ret = splice_to_pipe(pipe, &spd);
01507 lock_sock(sk);
01508 return ret;
01509 }
01510
01511 return 0;
01512 }
01513
01514
01515
01516
01517
01518
01519
01520
01521
01522
01523
01524
01525
01526 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
01527 {
01528 int i, copy;
01529 int start = skb_headlen(skb);
01530
01531 if (offset > (int)skb->len - len)
01532 goto fault;
01533
01534 if ((copy = start - offset) > 0) {
01535 if (copy > len)
01536 copy = len;
01537 skb_copy_to_linear_data_offset(skb, offset, from, copy);
01538 if ((len -= copy) == 0)
01539 return 0;
01540 offset += copy;
01541 from += copy;
01542 }
01543
01544 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
01545 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
01546 int end;
01547
01548 WARN_ON(start > offset + len);
01549
01550 end = start + frag->size;
01551 if ((copy = end - offset) > 0) {
01552 u8 *vaddr;
01553
01554 if (copy > len)
01555 copy = len;
01556
01557 vaddr = kmap_skb_frag(frag);
01558 memcpy(vaddr + frag->page_offset + offset - start,
01559 from, copy);
01560 kunmap_skb_frag(vaddr);
01561
01562 if ((len -= copy) == 0)
01563 return 0;
01564 offset += copy;
01565 from += copy;
01566 }
01567 start = end;
01568 }
01569
01570 if (skb_shinfo(skb)->frag_list) {
01571 struct sk_buff *list = skb_shinfo(skb)->frag_list;
01572
01573 for (; list; list = list->next) {
01574 int end;
01575
01576 WARN_ON(start > offset + len);
01577
01578 end = start + list->len;
01579 if ((copy = end - offset) > 0) {
01580 if (copy > len)
01581 copy = len;
01582 if (skb_store_bits(list, offset - start,
01583 from, copy))
01584 goto fault;
01585 if ((len -= copy) == 0)
01586 return 0;
01587 offset += copy;
01588 from += copy;
01589 }
01590 start = end;
01591 }
01592 }
01593 if (!len)
01594 return 0;
01595
01596 fault:
01597 return -EFAULT;
01598 }
01599
01600 EXPORT_SYMBOL(skb_store_bits);
01601
01602
01603
01604 __wsum skb_checksum(const struct sk_buff *skb, int offset,
01605 int len, __wsum csum)
01606 {
01607 int start = skb_headlen(skb);
01608 int i, copy = start - offset;
01609 int pos = 0;
01610
01611
01612 if (copy > 0) {
01613 if (copy > len)
01614 copy = len;
01615 csum = csum_partial(skb->data + offset, copy, csum);
01616 if ((len -= copy) == 0)
01617 return csum;
01618 offset += copy;
01619 pos = copy;
01620 }
01621
01622 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
01623 int end;
01624
01625 WARN_ON(start > offset + len);
01626
01627 end = start + skb_shinfo(skb)->frags[i].size;
01628 if ((copy = end - offset) > 0) {
01629 __wsum csum2;
01630 u8 *vaddr;
01631 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
01632
01633 if (copy > len)
01634 copy = len;
01635 vaddr = kmap_skb_frag(frag);
01636 csum2 = csum_partial(vaddr + frag->page_offset +
01637 offset - start, copy, 0);
01638 kunmap_skb_frag(vaddr);
01639 csum = csum_block_add(csum, csum2, pos);
01640 if (!(len -= copy))
01641 return csum;
01642 offset += copy;
01643 pos += copy;
01644 }
01645 start = end;
01646 }
01647
01648 if (skb_shinfo(skb)->frag_list) {
01649 struct sk_buff *list = skb_shinfo(skb)->frag_list;
01650
01651 for (; list; list = list->next) {
01652 int end;
01653
01654 WARN_ON(start > offset + len);
01655
01656 end = start + list->len;
01657 if ((copy = end - offset) > 0) {
01658 __wsum csum2;
01659 if (copy > len)
01660 copy = len;
01661 csum2 = skb_checksum(list, offset - start,
01662 copy, 0);
01663 csum = csum_block_add(csum, csum2, pos);
01664 if ((len -= copy) == 0)
01665 return csum;
01666 offset += copy;
01667 pos += copy;
01668 }
01669 start = end;
01670 }
01671 }
01672 BUG_ON(len);
01673
01674 return csum;
01675 }
01676
01677
01678
01679 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
01680 u8 *to, int len, __wsum csum)
01681 {
01682 int start = skb_headlen(skb);
01683 int i, copy = start - offset;
01684 int pos = 0;
01685
01686
01687 if (copy > 0) {
01688 if (copy > len)
01689 copy = len;
01690 csum = csum_partial_copy_nocheck(skb->data + offset, to,
01691 copy, csum);
01692 if ((len -= copy) == 0)
01693 return csum;
01694 offset += copy;
01695 to += copy;
01696 pos = copy;
01697 }
01698
01699 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
01700 int end;
01701
01702 WARN_ON(start > offset + len);
01703
01704 end = start + skb_shinfo(skb)->frags[i].size;
01705 if ((copy = end - offset) > 0) {
01706 __wsum csum2;
01707 u8 *vaddr;
01708 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
01709
01710 if (copy > len)
01711 copy = len;
01712 vaddr = kmap_skb_frag(frag);
01713 csum2 = csum_partial_copy_nocheck(vaddr +
01714 frag->page_offset +
01715 offset - start, to,
01716 copy, 0);
01717 kunmap_skb_frag(vaddr);
01718 csum = csum_block_add(csum, csum2, pos);
01719 if (!(len -= copy))
01720 return csum;
01721 offset += copy;
01722 to += copy;
01723 pos += copy;
01724 }
01725 start = end;
01726 }
01727
01728 if (skb_shinfo(skb)->frag_list) {
01729 struct sk_buff *list = skb_shinfo(skb)->frag_list;
01730
01731 for (; list; list = list->next) {
01732 __wsum csum2;
01733 int end;
01734
01735 WARN_ON(start > offset + len);
01736
01737 end = start + list->len;
01738 if ((copy = end - offset) > 0) {
01739 if (copy > len)
01740 copy = len;
01741 csum2 = skb_copy_and_csum_bits(list,
01742 offset - start,
01743 to, copy, 0);
01744 csum = csum_block_add(csum, csum2, pos);
01745 if ((len -= copy) == 0)
01746 return csum;
01747 offset += copy;
01748 to += copy;
01749 pos += copy;
01750 }
01751 start = end;
01752 }
01753 }
01754 BUG_ON(len);
01755 return csum;
01756 }
01757
01758 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
01759 {
01760 __wsum csum;
01761 long csstart;
01762
01763 if (skb->ip_summed == CHECKSUM_PARTIAL)
01764 csstart = skb->csum_start - skb_headroom(skb);
01765 else
01766 csstart = skb_headlen(skb);
01767
01768 BUG_ON(csstart > skb_headlen(skb));
01769
01770 skb_copy_from_linear_data(skb, to, csstart);
01771
01772 csum = 0;
01773 if (csstart != skb->len)
01774 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
01775 skb->len - csstart, 0);
01776
01777 if (skb->ip_summed == CHECKSUM_PARTIAL) {
01778 long csstuff = csstart + skb->csum_offset;
01779
01780 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
01781 }
01782 }
01783
01784
01785
01786
01787
01788
01789
01790
01791
01792
01793 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
01794 {
01795 unsigned long flags;
01796 struct sk_buff *result;
01797
01798 spin_lock_irqsave(&list->lock, flags);
01799 result = __skb_dequeue(list);
01800 spin_unlock_irqrestore(&list->lock, flags);
01801 return result;
01802 }
01803
01804
01805
01806
01807
01808
01809
01810
01811
01812 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
01813 {
01814 unsigned long flags;
01815 struct sk_buff *result;
01816
01817 spin_lock_irqsave(&list->lock, flags);
01818 result = __skb_dequeue_tail(list);
01819 spin_unlock_irqrestore(&list->lock, flags);
01820 return result;
01821 }
01822
01823
01824
01825
01826
01827
01828
01829
01830
01831 void skb_queue_purge(struct sk_buff_head *list)
01832 {
01833 struct sk_buff *skb;
01834 while ((skb = skb_dequeue(list)) != NULL)
01835 kfree_skb(skb);
01836 }
01837
01838
01839
01840
01841
01842
01843
01844
01845
01846
01847
01848
01849 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
01850 {
01851 unsigned long flags;
01852
01853 spin_lock_irqsave(&list->lock, flags);
01854 __skb_queue_head(list, newsk);
01855 spin_unlock_irqrestore(&list->lock, flags);
01856 }
01857
01858
01859
01860
01861
01862
01863
01864
01865
01866
01867
01868
01869 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
01870 {
01871 unsigned long flags;
01872
01873 spin_lock_irqsave(&list->lock, flags);
01874 __skb_queue_tail(list, newsk);
01875 spin_unlock_irqrestore(&list->lock, flags);
01876 }
01877
01878
01879
01880
01881
01882
01883
01884
01885
01886
01887
01888 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
01889 {
01890 unsigned long flags;
01891
01892 spin_lock_irqsave(&list->lock, flags);
01893 __skb_unlink(skb, list);
01894 spin_unlock_irqrestore(&list->lock, flags);
01895 }
01896
01897
01898
01899
01900
01901
01902
01903
01904
01905
01906
01907 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
01908 {
01909 unsigned long flags;
01910
01911 spin_lock_irqsave(&list->lock, flags);
01912 __skb_queue_after(list, old, newsk);
01913 spin_unlock_irqrestore(&list->lock, flags);
01914 }
01915
01916
01917
01918
01919
01920
01921
01922
01923
01924
01925
01926
01927
01928
01929 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
01930 {
01931 unsigned long flags;
01932
01933 spin_lock_irqsave(&list->lock, flags);
01934 __skb_insert(newsk, old->prev, old, list);
01935 spin_unlock_irqrestore(&list->lock, flags);
01936 }
01937
01938 static inline void skb_split_inside_header(struct sk_buff *skb,
01939 struct sk_buff* skb1,
01940 const u32 len, const int pos)
01941 {
01942 int i;
01943
01944 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
01945 pos - len);
01946
01947 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
01948 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
01949
01950 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
01951 skb_shinfo(skb)->nr_frags = 0;
01952 skb1->data_len = skb->data_len;
01953 skb1->len += skb1->data_len;
01954 skb->data_len = 0;
01955 skb->len = len;
01956 skb_set_tail_pointer(skb, len);
01957 }
01958
01959 static inline void skb_split_no_header(struct sk_buff *skb,
01960 struct sk_buff* skb1,
01961 const u32 len, int pos)
01962 {
01963 int i, k = 0;
01964 const int nfrags = skb_shinfo(skb)->nr_frags;
01965
01966 skb_shinfo(skb)->nr_frags = 0;
01967 skb1->len = skb1->data_len = skb->len - len;
01968 skb->len = len;
01969 skb->data_len = len - pos;
01970
01971 for (i = 0; i < nfrags; i++) {
01972 int size = skb_shinfo(skb)->frags[i].size;
01973
01974 if (pos + size > len) {
01975 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
01976
01977 if (pos < len) {
01978
01979
01980
01981
01982
01983
01984
01985
01986 get_page(skb_shinfo(skb)->frags[i].page);
01987 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
01988 skb_shinfo(skb1)->frags[0].size -= len - pos;
01989 skb_shinfo(skb)->frags[i].size = len - pos;
01990 skb_shinfo(skb)->nr_frags++;
01991 }
01992 k++;
01993 } else
01994 skb_shinfo(skb)->nr_frags++;
01995 pos += size;
01996 }
01997 skb_shinfo(skb1)->nr_frags = k;
01998 }
01999
02000
02001
02002
02003
02004
02005
02006 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
02007 {
02008 int pos = skb_headlen(skb);
02009
02010 if (len < pos)
02011 skb_split_inside_header(skb, skb1, len, pos);
02012 else
02013 skb_split_no_header(skb, skb1, len, pos);
02014 }
02015
02016
02017
02018
02019
02020 static int skb_prepare_for_shift(struct sk_buff *skb)
02021 {
02022 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
02023 }
02024
02025
02026
02027
02028
02029
02030
02031
02032
02033
02034
02035
02036
02037
02038
02039
02040
02041
02042
02043 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
02044 {
02045 int from, to, merge, todo;
02046 struct skb_frag_struct *fragfrom, *fragto;
02047
02048 BUG_ON(shiftlen > skb->len);
02049 BUG_ON(skb_headlen(skb));
02050
02051 todo = shiftlen;
02052 from = 0;
02053 to = skb_shinfo(tgt)->nr_frags;
02054 fragfrom = &skb_shinfo(skb)->frags[from];
02055
02056
02057
02058
02059 if (!to ||
02060 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
02061 merge = -1;
02062 } else {
02063 merge = to - 1;
02064
02065 todo -= fragfrom->size;
02066 if (todo < 0) {
02067 if (skb_prepare_for_shift(skb) ||
02068 skb_prepare_for_shift(tgt))
02069 return 0;
02070
02071
02072 fragfrom = &skb_shinfo(skb)->frags[from];
02073 fragto = &skb_shinfo(tgt)->frags[merge];
02074
02075 fragto->size += shiftlen;
02076 fragfrom->size -= shiftlen;
02077 fragfrom->page_offset += shiftlen;
02078
02079 goto onlymerged;
02080 }
02081
02082 from++;
02083 }
02084
02085
02086 if ((shiftlen == skb->len) &&
02087 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
02088 return 0;
02089
02090 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
02091 return 0;
02092
02093 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
02094 if (to == MAX_SKB_FRAGS)
02095 return 0;
02096
02097 fragfrom = &skb_shinfo(skb)->frags[from];
02098 fragto = &skb_shinfo(tgt)->frags[to];
02099
02100 if (todo >= fragfrom->size) {
02101 *fragto = *fragfrom;
02102 todo -= fragfrom->size;
02103 from++;
02104 to++;
02105
02106 } else {
02107 get_page(fragfrom->page);
02108 fragto->page = fragfrom->page;
02109 fragto->page_offset = fragfrom->page_offset;
02110 fragto->size = todo;
02111
02112 fragfrom->page_offset += todo;
02113 fragfrom->size -= todo;
02114 todo = 0;
02115
02116 to++;
02117 break;
02118 }
02119 }
02120
02121
02122 skb_shinfo(tgt)->nr_frags = to;
02123
02124 if (merge >= 0) {
02125 fragfrom = &skb_shinfo(skb)->frags[0];
02126 fragto = &skb_shinfo(tgt)->frags[merge];
02127
02128 fragto->size += fragfrom->size;
02129 put_page(fragfrom->page);
02130 }
02131
02132
02133 to = 0;
02134 while (from < skb_shinfo(skb)->nr_frags)
02135 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
02136 skb_shinfo(skb)->nr_frags = to;
02137
02138 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
02139
02140 onlymerged:
02141
02142
02143
02144 tgt->ip_summed = CHECKSUM_PARTIAL;
02145 skb->ip_summed = CHECKSUM_PARTIAL;
02146
02147
02148 skb->len -= shiftlen;
02149 skb->data_len -= shiftlen;
02150 skb->truesize -= shiftlen;
02151 tgt->len += shiftlen;
02152 tgt->data_len += shiftlen;
02153 tgt->truesize += shiftlen;
02154
02155 return shiftlen;
02156 }
02157
02158
02159
02160
02161
02162
02163
02164
02165
02166
02167
02168 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
02169 unsigned int to, struct skb_seq_state *st)
02170 {
02171 st->lower_offset = from;
02172 st->upper_offset = to;
02173 st->root_skb = st->cur_skb = skb;
02174 st->frag_idx = st->stepped_offset = 0;
02175 st->frag_data = NULL;
02176 }
02177
02178
02179
02180
02181
02182
02183
02184
02185
02186
02187
02188
02189
02190
02191
02192
02193
02194
02195
02196
02197
02198
02199
02200
02201
02202
02203 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
02204 struct skb_seq_state *st)
02205 {
02206 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
02207 skb_frag_t *frag;
02208
02209 if (unlikely(abs_offset >= st->upper_offset))
02210 return 0;
02211
02212 next_skb:
02213 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
02214
02215 if (abs_offset < block_limit) {
02216 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
02217 return block_limit - abs_offset;
02218 }
02219
02220 if (st->frag_idx == 0 && !st->frag_data)
02221 st->stepped_offset += skb_headlen(st->cur_skb);
02222
02223 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
02224 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
02225 block_limit = frag->size + st->stepped_offset;
02226
02227 if (abs_offset < block_limit) {
02228 if (!st->frag_data)
02229 st->frag_data = kmap_skb_frag(frag);
02230
02231 *data = (u8 *) st->frag_data + frag->page_offset +
02232 (abs_offset - st->stepped_offset);
02233
02234 return block_limit - abs_offset;
02235 }
02236
02237 if (st->frag_data) {
02238 kunmap_skb_frag(st->frag_data);
02239 st->frag_data = NULL;
02240 }
02241
02242 st->frag_idx++;
02243 st->stepped_offset += frag->size;
02244 }
02245
02246 if (st->frag_data) {
02247 kunmap_skb_frag(st->frag_data);
02248 st->frag_data = NULL;
02249 }
02250
02251 if (st->root_skb == st->cur_skb &&
02252 skb_shinfo(st->root_skb)->frag_list) {
02253 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
02254 st->frag_idx = 0;
02255 goto next_skb;
02256 } else if (st->cur_skb->next) {
02257 st->cur_skb = st->cur_skb->next;
02258 st->frag_idx = 0;
02259 goto next_skb;
02260 }
02261
02262 return 0;
02263 }
02264
02265
02266
02267
02268
02269
02270
02271
02272 void skb_abort_seq_read(struct skb_seq_state *st)
02273 {
02274 if (st->frag_data)
02275 kunmap_skb_frag(st->frag_data);
02276 }
02277
02278 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
02279
02280 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
02281 struct ts_config *conf,
02282 struct ts_state *state)
02283 {
02284 return skb_seq_read(offset, text, TS_SKB_CB(state));
02285 }
02286
02287 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
02288 {
02289 skb_abort_seq_read(TS_SKB_CB(state));
02290 }
02291
02292
02293
02294
02295
02296
02297
02298
02299
02300
02301
02302
02303
02304
02305 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
02306 unsigned int to, struct ts_config *config,
02307 struct ts_state *state)
02308 {
02309 unsigned int ret;
02310
02311 config->get_next_block = skb_ts_get_next_block;
02312 config->finish = skb_ts_finish;
02313
02314 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
02315
02316 ret = textsearch_find(config, state);
02317 return (ret <= to - from ? ret : UINT_MAX);
02318 }
02319
02320
02321
02322
02323
02324
02325
02326
02327
02328
02329
02330
02331 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
02332 int (*getfrag)(void *from, char *to, int offset,
02333 int len, int odd, struct sk_buff *skb),
02334 void *from, int length)
02335 {
02336 int frg_cnt = 0;
02337 skb_frag_t *frag = NULL;
02338 struct page *page = NULL;
02339 int copy, left;
02340 int offset = 0;
02341 int ret;
02342
02343 do {
02344
02345 frg_cnt = skb_shinfo(skb)->nr_frags;
02346 if (frg_cnt >= MAX_SKB_FRAGS)
02347 return -EFAULT;
02348
02349
02350 page = alloc_pages(sk->sk_allocation, 0);
02351
02352
02353
02354
02355 if (page == NULL)
02356 return -ENOMEM;
02357
02358
02359 sk->sk_sndmsg_page = page;
02360 sk->sk_sndmsg_off = 0;
02361 skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
02362 skb->truesize += PAGE_SIZE;
02363 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
02364
02365
02366 frg_cnt = skb_shinfo(skb)->nr_frags;
02367 frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
02368
02369
02370 left = PAGE_SIZE - frag->page_offset;
02371 copy = (length > left)? left : length;
02372
02373 ret = getfrag(from, (page_address(frag->page) +
02374 frag->page_offset + frag->size),
02375 offset, copy, 0, skb);
02376 if (ret < 0)
02377 return -EFAULT;
02378
02379
02380 sk->sk_sndmsg_off += copy;
02381 frag->size += copy;
02382 skb->len += copy;
02383 skb->data_len += copy;
02384 offset += copy;
02385 length -= copy;
02386
02387 } while (length > 0);
02388
02389 return 0;
02390 }
02391
02392
02393
02394
02395
02396
02397
02398
02399
02400
02401
02402
02403 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
02404 {
02405 BUG_ON(len > skb->len);
02406 skb->len -= len;
02407 BUG_ON(skb->len < skb->data_len);
02408 skb_postpull_rcsum(skb, skb->data, len);
02409 return skb->data += len;
02410 }
02411
02412 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
02413
02414
02415
02416
02417
02418
02419
02420
02421
02422
02423 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
02424 {
02425 struct sk_buff *segs = NULL;
02426 struct sk_buff *tail = NULL;
02427 struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
02428 unsigned int mss = skb_shinfo(skb)->gso_size;
02429 unsigned int doffset = skb->data - skb_mac_header(skb);
02430 unsigned int offset = doffset;
02431 unsigned int headroom;
02432 unsigned int len;
02433 int sg = features & NETIF_F_SG;
02434 int nfrags = skb_shinfo(skb)->nr_frags;
02435 int err = -ENOMEM;
02436 int i = 0;
02437 int pos;
02438
02439 __skb_push(skb, doffset);
02440 headroom = skb_headroom(skb);
02441 pos = skb_headlen(skb);
02442
02443 do {
02444 struct sk_buff *nskb;
02445 skb_frag_t *frag;
02446 int hsize;
02447 int size;
02448
02449 len = skb->len - offset;
02450 if (len > mss)
02451 len = mss;
02452
02453 hsize = skb_headlen(skb) - offset;
02454 if (hsize < 0)
02455 hsize = 0;
02456 if (hsize > len || !sg)
02457 hsize = len;
02458
02459 if (!hsize && i >= nfrags) {
02460 BUG_ON(fskb->len != len);
02461
02462 pos += len;
02463 nskb = skb_clone(fskb, GFP_ATOMIC);
02464 fskb = fskb->next;
02465
02466 if (unlikely(!nskb))
02467 goto err;
02468
02469 hsize = skb_end_pointer(nskb) - nskb->head;
02470 if (skb_cow_head(nskb, doffset + headroom)) {
02471 kfree_skb(nskb);
02472 goto err;
02473 }
02474
02475 nskb->truesize += skb_end_pointer(nskb) - nskb->head -
02476 hsize;
02477 skb_release_head_state(nskb);
02478 __skb_push(nskb, doffset);
02479 } else {
02480 nskb = alloc_skb(hsize + doffset + headroom,
02481 GFP_ATOMIC);
02482
02483 if (unlikely(!nskb))
02484 goto err;
02485
02486 skb_reserve(nskb, headroom);
02487 __skb_put(nskb, doffset);
02488 }
02489
02490 if (segs)
02491 tail->next = nskb;
02492 else
02493 segs = nskb;
02494 tail = nskb;
02495
02496 __copy_skb_header(nskb, skb);
02497 nskb->mac_len = skb->mac_len;
02498
02499 skb_reset_mac_header(nskb);
02500 skb_set_network_header(nskb, skb->mac_len);
02501 nskb->transport_header = (nskb->network_header +
02502 skb_network_header_len(skb));
02503 skb_copy_from_linear_data(skb, nskb->data, doffset);
02504
02505 if (pos >= offset + len)
02506 continue;
02507
02508 if (!sg) {
02509 nskb->ip_summed = CHECKSUM_NONE;
02510 nskb->csum = skb_copy_and_csum_bits(skb, offset,
02511 skb_put(nskb, len),
02512 len, 0);
02513 continue;
02514 }
02515
02516 frag = skb_shinfo(nskb)->frags;
02517
02518 skb_copy_from_linear_data_offset(skb, offset,
02519 skb_put(nskb, hsize), hsize);
02520
02521 while (pos < offset + len && i < nfrags) {
02522 *frag = skb_shinfo(skb)->frags[i];
02523 get_page(frag->page);
02524 size = frag->size;
02525
02526 if (pos < offset) {
02527 frag->page_offset += offset - pos;
02528 frag->size -= offset - pos;
02529 }
02530
02531 skb_shinfo(nskb)->nr_frags++;
02532
02533 if (pos + size <= offset + len) {
02534 i++;
02535 pos += size;
02536 } else {
02537 frag->size -= pos + size - (offset + len);
02538 goto skip_fraglist;
02539 }
02540
02541 frag++;
02542 }
02543
02544 if (pos < offset + len) {
02545 struct sk_buff *fskb2 = fskb;
02546
02547 BUG_ON(pos + fskb->len != offset + len);
02548
02549 pos += fskb->len;
02550 fskb = fskb->next;
02551
02552 if (fskb2->next) {
02553 fskb2 = skb_clone(fskb2, GFP_ATOMIC);
02554 if (!fskb2)
02555 goto err;
02556 } else
02557 skb_get(fskb2);
02558
02559 BUG_ON(skb_shinfo(nskb)->frag_list);
02560 skb_shinfo(nskb)->frag_list = fskb2;
02561 }
02562
02563 skip_fraglist:
02564 nskb->data_len = len - hsize;
02565 nskb->len += nskb->data_len;
02566 nskb->truesize += nskb->data_len;
02567 } while ((offset += len) < skb->len);
02568
02569 return segs;
02570
02571 err:
02572 while ((skb = segs)) {
02573 segs = skb->next;
02574 kfree_skb(skb);
02575 }
02576 return ERR_PTR(err);
02577 }
02578
02579 EXPORT_SYMBOL_GPL(skb_segment);
02580
02581 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
02582 {
02583 struct sk_buff *p = *head;
02584 struct sk_buff *nskb;
02585 unsigned int headroom;
02586 unsigned int hlen = p->data - skb_mac_header(p);
02587 unsigned int len = skb->len;
02588
02589 if (hlen + p->len + len >= 65536)
02590 return -E2BIG;
02591
02592 if (skb_shinfo(p)->frag_list)
02593 goto merge;
02594 else if (!skb_headlen(p) && !skb_headlen(skb) &&
02595 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <
02596 MAX_SKB_FRAGS) {
02597 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
02598 skb_shinfo(skb)->frags,
02599 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
02600
02601 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
02602 skb_shinfo(skb)->nr_frags = 0;
02603
02604 skb->truesize -= skb->data_len;
02605 skb->len -= skb->data_len;
02606 skb->data_len = 0;
02607
02608 NAPI_GRO_CB(skb)->free = 1;
02609 goto done;
02610 }
02611
02612 headroom = skb_headroom(p);
02613 nskb = netdev_alloc_skb(p->dev, headroom);
02614 if (unlikely(!nskb))
02615 return -ENOMEM;
02616
02617 __copy_skb_header(nskb, p);
02618 nskb->mac_len = p->mac_len;
02619
02620 skb_reserve(nskb, headroom);
02621
02622 skb_set_mac_header(nskb, -hlen);
02623 skb_set_network_header(nskb, skb_network_offset(p));
02624 skb_set_transport_header(nskb, skb_transport_offset(p));
02625
02626 memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen);
02627
02628 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
02629 skb_shinfo(nskb)->frag_list = p;
02630 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
02631 skb_header_release(p);
02632 nskb->prev = p;
02633
02634 nskb->data_len += p->len;
02635 nskb->truesize += p->len;
02636 nskb->len += p->len;
02637
02638 *head = nskb;
02639 nskb->next = p->next;
02640 p->next = NULL;
02641
02642 p = nskb;
02643
02644 merge:
02645 p->prev->next = skb;
02646 p->prev = skb;
02647 skb_header_release(skb);
02648
02649 done:
02650 NAPI_GRO_CB(p)->count++;
02651 p->data_len += len;
02652 p->truesize += len;
02653 p->len += len;
02654
02655 NAPI_GRO_CB(skb)->same_flow = 1;
02656 return 0;
02657 }
02658 EXPORT_SYMBOL_GPL(skb_gro_receive);
02659
02660 void __init skb_init(void)
02661 {
02662 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
02663 sizeof(struct sk_buff),
02664 0,
02665 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
02666 NULL);
02667 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
02668 (2*sizeof(struct sk_buff)) +
02669 sizeof(atomic_t),
02670 0,
02671 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
02672 NULL);
02673 }
02674
02675
02676
02677
02678
02679
02680
02681
02682
02683
02684
02685 static int
02686 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
02687 {
02688 int start = skb_headlen(skb);
02689 int i, copy = start - offset;
02690 int elt = 0;
02691
02692 if (copy > 0) {
02693 if (copy > len)
02694 copy = len;
02695 sg_set_buf(sg, skb->data + offset, copy);
02696 elt++;
02697 if ((len -= copy) == 0)
02698 return elt;
02699 offset += copy;
02700 }
02701
02702 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
02703 int end;
02704
02705 WARN_ON(start > offset + len);
02706
02707 end = start + skb_shinfo(skb)->frags[i].size;
02708 if ((copy = end - offset) > 0) {
02709 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
02710
02711 if (copy > len)
02712 copy = len;
02713 sg_set_page(&sg[elt], frag->page, copy,
02714 frag->page_offset+offset-start);
02715 elt++;
02716 if (!(len -= copy))
02717 return elt;
02718 offset += copy;
02719 }
02720 start = end;
02721 }
02722
02723 if (skb_shinfo(skb)->frag_list) {
02724 struct sk_buff *list = skb_shinfo(skb)->frag_list;
02725
02726 for (; list; list = list->next) {
02727 int end;
02728
02729 WARN_ON(start > offset + len);
02730
02731 end = start + list->len;
02732 if ((copy = end - offset) > 0) {
02733 if (copy > len)
02734 copy = len;
02735 elt += __skb_to_sgvec(list, sg+elt, offset - start,
02736 copy);
02737 if ((len -= copy) == 0)
02738 return elt;
02739 offset += copy;
02740 }
02741 start = end;
02742 }
02743 }
02744 BUG_ON(len);
02745 return elt;
02746 }
02747
02748 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
02749 {
02750 int nsg = __skb_to_sgvec(skb, sg, offset, len);
02751
02752 sg_mark_end(&sg[nsg - 1]);
02753
02754 return nsg;
02755 }
02756
02757
02758
02759
02760
02761
02762
02763
02764
02765
02766
02767
02768
02769
02770
02771
02772
02773
02774 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
02775 {
02776 int copyflag;
02777 int elt;
02778 struct sk_buff *skb1, **skb_p;
02779
02780
02781
02782
02783
02784 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
02785 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
02786 return -ENOMEM;
02787
02788
02789 if (!skb_shinfo(skb)->frag_list) {
02790
02791
02792
02793
02794
02795 if (skb_tailroom(skb) < tailbits &&
02796 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
02797 return -ENOMEM;
02798
02799
02800 *trailer = skb;
02801 return 1;
02802 }
02803
02804
02805
02806 elt = 1;
02807 skb_p = &skb_shinfo(skb)->frag_list;
02808 copyflag = 0;
02809
02810 while ((skb1 = *skb_p) != NULL) {
02811 int ntail = 0;
02812
02813
02814
02815
02816
02817 if (skb_shared(skb1))
02818 copyflag = 1;
02819
02820
02821
02822 if (skb1->next == NULL && tailbits) {
02823 if (skb_shinfo(skb1)->nr_frags ||
02824 skb_shinfo(skb1)->frag_list ||
02825 skb_tailroom(skb1) < tailbits)
02826 ntail = tailbits + 128;
02827 }
02828
02829 if (copyflag ||
02830 skb_cloned(skb1) ||
02831 ntail ||
02832 skb_shinfo(skb1)->nr_frags ||
02833 skb_shinfo(skb1)->frag_list) {
02834 struct sk_buff *skb2;
02835
02836
02837 if (ntail == 0)
02838 skb2 = skb_copy(skb1, GFP_ATOMIC);
02839 else
02840 skb2 = skb_copy_expand(skb1,
02841 skb_headroom(skb1),
02842 ntail,
02843 GFP_ATOMIC);
02844 if (unlikely(skb2 == NULL))
02845 return -ENOMEM;
02846
02847 if (skb1->sk)
02848 skb_set_owner_w(skb2, skb1->sk);
02849
02850
02851
02852
02853 skb2->next = skb1->next;
02854 *skb_p = skb2;
02855 kfree_skb(skb1);
02856 skb1 = skb2;
02857 }
02858 elt++;
02859 *trailer = skb1;
02860 skb_p = &skb1->next;
02861 }
02862
02863 return elt;
02864 }
02865
02866
02867
02868
02869
02870
02871
02872
02873
02874
02875
02876
02877
02878 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
02879 {
02880 if (unlikely(start > skb->len - 2) ||
02881 unlikely((int)start + off > skb->len - 2)) {
02882 if (net_ratelimit())
02883 printk(KERN_WARNING
02884 "bad partial csum: csum=%u/%u len=%u\n",
02885 start, off, skb->len);
02886 return false;
02887 }
02888 skb->ip_summed = CHECKSUM_PARTIAL;
02889 skb->csum_start = skb_headroom(skb) + start;
02890 skb->csum_offset = off;
02891 return true;
02892 }
02893
02894 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
02895 {
02896 if (net_ratelimit())
02897 pr_warning("%s: received packets cannot be forwarded"
02898 " while LRO is enabled\n", skb->dev->name);
02899 }
02900
02901 EXPORT_SYMBOL(___pskb_trim);
02902 EXPORT_SYMBOL(__kfree_skb);
02903 EXPORT_SYMBOL(kfree_skb);
02904 EXPORT_SYMBOL(__pskb_pull_tail);
02905 EXPORT_SYMBOL(__alloc_skb);
02906 EXPORT_SYMBOL(__netdev_alloc_skb);
02907 EXPORT_SYMBOL(pskb_copy);
02908 EXPORT_SYMBOL(pskb_expand_head);
02909 EXPORT_SYMBOL(skb_checksum);
02910 EXPORT_SYMBOL(skb_clone);
02911 EXPORT_SYMBOL(skb_copy);
02912 EXPORT_SYMBOL(skb_copy_and_csum_bits);
02913 EXPORT_SYMBOL(skb_copy_and_csum_dev);
02914 EXPORT_SYMBOL(skb_copy_bits);
02915 EXPORT_SYMBOL(skb_copy_expand);
02916 EXPORT_SYMBOL(skb_over_panic);
02917 EXPORT_SYMBOL(skb_pad);
02918 EXPORT_SYMBOL(skb_realloc_headroom);
02919 EXPORT_SYMBOL(skb_under_panic);
02920 EXPORT_SYMBOL(skb_dequeue);
02921 EXPORT_SYMBOL(skb_dequeue_tail);
02922 EXPORT_SYMBOL(skb_insert);
02923 EXPORT_SYMBOL(skb_queue_purge);
02924 EXPORT_SYMBOL(skb_queue_head);
02925 EXPORT_SYMBOL(skb_queue_tail);
02926 EXPORT_SYMBOL(skb_unlink);
02927 EXPORT_SYMBOL(skb_append);
02928 EXPORT_SYMBOL(skb_split);
02929 EXPORT_SYMBOL(skb_prepare_seq_read);
02930 EXPORT_SYMBOL(skb_seq_read);
02931 EXPORT_SYMBOL(skb_abort_seq_read);
02932 EXPORT_SYMBOL(skb_find_text);
02933 EXPORT_SYMBOL(skb_append_datato_frags);
02934 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
02935
02936 EXPORT_SYMBOL_GPL(skb_to_sgvec);
02937 EXPORT_SYMBOL_GPL(skb_cow_data);
02938 EXPORT_SYMBOL_GPL(skb_partial_csum_set);