Go to the source code of this file.
Defines | |
| #define | C(x) n->x = skb->x |
| #define | TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
Functions | |
| static void | sock_pipe_buf_release (struct pipe_inode_info *pipe, struct pipe_buffer *buf) |
| static void | sock_pipe_buf_get (struct pipe_inode_info *pipe, struct pipe_buffer *buf) |
| static int | sock_pipe_buf_steal (struct pipe_inode_info *pipe, struct pipe_buffer *buf) |
| void | skb_over_panic (struct sk_buff *skb, int sz, void *here) |
| skb_over_panic - private function : buffer : size : address | |
| void | skb_under_panic (struct sk_buff *skb, int sz, void *here) |
| skb_under_panic - private function : buffer : size : address | |
| struct sk_buff * | __alloc_skb (unsigned int size, gfp_t gfp_mask, int fclone, int node) |
| __alloc_skb - allocate a network buffer : size to allocate : allocation mask : allocate from fclone cache instead of head cache and allocate a cloned (child) skb : numa node to allocate memory on | |
| struct sk_buff * | __netdev_alloc_skb (struct net_device *dev, unsigned int length, gfp_t gfp_mask) |
| __netdev_alloc_skb - allocate an skbuff for rx on a specific device : network device to receive on : length to allocate : get_free_pages mask, passed to alloc_skb | |
| struct page * | __netdev_alloc_page (struct net_device *dev, gfp_t gfp_mask) |
| EXPORT_SYMBOL (__netdev_alloc_page) | |
| void | skb_add_rx_frag (struct sk_buff *skb, int i, struct page *page, int off, int size) |
| EXPORT_SYMBOL (skb_add_rx_frag) | |
| struct sk_buff * | dev_alloc_skb (unsigned int length) |
| dev_alloc_skb - allocate an skbuff for receiving : length to allocate | |
| EXPORT_SYMBOL (dev_alloc_skb) | |
| static void | skb_drop_list (struct sk_buff **listp) |
| static void | skb_drop_fraglist (struct sk_buff *skb) |
| static void | skb_clone_fraglist (struct sk_buff *skb) |
| static void | skb_release_data (struct sk_buff *skb) |
| static void | kfree_skbmem (struct sk_buff *skb) |
| static void | skb_release_head_state (struct sk_buff *skb) |
| static void | skb_release_all (struct sk_buff *skb) |
| void | __kfree_skb (struct sk_buff *skb) |
| __kfree_skb - private function : buffer | |
| void | kfree_skb (struct sk_buff *skb) |
| kfree_skb - free an sk_buff : buffer to free | |
| int | skb_recycle_check (struct sk_buff *skb, int skb_size) |
| skb_recycle_check - check if skb can be reused for receive : buffer : minimum receive buffer size | |
| EXPORT_SYMBOL (skb_recycle_check) | |
| static void | __copy_skb_header (struct sk_buff *new, const struct sk_buff *old) |
| static struct sk_buff * | __skb_clone (struct sk_buff *n, struct sk_buff *skb) |
| struct sk_buff * | skb_morph (struct sk_buff *dst, struct sk_buff *src) |
| skb_morph - morph one skb into another : the skb to receive the contents : the skb to supply the contents | |
| EXPORT_SYMBOL_GPL (skb_morph) | |
| struct sk_buff * | skb_clone (struct sk_buff *skb, gfp_t gfp_mask) |
| skb_clone - duplicate an sk_buff : buffer to clone : allocation priority | |
| static void | copy_skb_header (struct sk_buff *new, const struct sk_buff *old) |
| struct sk_buff * | skb_copy (const struct sk_buff *skb, gfp_t gfp_mask) |
| skb_copy - create private copy of an sk_buff : buffer to copy : allocation priority | |
| struct sk_buff * | pskb_copy (struct sk_buff *skb, gfp_t gfp_mask) |
| pskb_copy - create copy of an sk_buff with private head. | |
| int | pskb_expand_head (struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask) |
| pskb_expand_head - reallocate header of &sk_buff : buffer to reallocate : room to add at head : room to add at tail : allocation priority | |
| struct sk_buff * | skb_realloc_headroom (struct sk_buff *skb, unsigned int headroom) |
| struct sk_buff * | skb_copy_expand (const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t gfp_mask) |
| skb_copy_expand - copy and expand sk_buff : buffer to copy : new free bytes at head : new free bytes at tail : allocation priority | |
| int | skb_pad (struct sk_buff *skb, int pad) |
| skb_pad - zero pad the tail of an skb : buffer to pad : space to pad | |
| unsigned char * | skb_put (struct sk_buff *skb, unsigned int len) |
| skb_put - add data to a buffer : buffer to use : amount of data to add | |
| EXPORT_SYMBOL (skb_put) | |
| unsigned char * | skb_push (struct sk_buff *skb, unsigned int len) |
| skb_push - add data to the start of a buffer : buffer to use : amount of data to add | |
| EXPORT_SYMBOL (skb_push) | |
| unsigned char * | skb_pull (struct sk_buff *skb, unsigned int len) |
| skb_pull - remove data from the start of a buffer : buffer to use : amount of data to remove | |
| EXPORT_SYMBOL (skb_pull) | |
| void | skb_trim (struct sk_buff *skb, unsigned int len) |
| skb_trim - remove end from a buffer : buffer to alter : new length | |
| EXPORT_SYMBOL (skb_trim) | |
| int | ___pskb_trim (struct sk_buff *skb, unsigned int len) |
| unsigned char * | __pskb_pull_tail (struct sk_buff *skb, int delta) |
| __pskb_pull_tail - advance tail of skb header : buffer to reallocate : number of bytes to advance tail | |
| int | skb_copy_bits (const struct sk_buff *skb, int offset, void *to, int len) |
| static void | sock_spd_release (struct splice_pipe_desc *spd, unsigned int i) |
| static struct page * | linear_to_page (struct page *page, unsigned int len, unsigned int offset) |
| static int | spd_fill_page (struct splice_pipe_desc *spd, struct page *page, unsigned int len, unsigned int offset, struct sk_buff *skb, int linear) |
| static void | __segment_seek (struct page **page, unsigned int *poff, unsigned int *plen, unsigned int off) |
| static int | __splice_segment (struct page *page, unsigned int poff, unsigned int plen, unsigned int *off, unsigned int *len, struct sk_buff *skb, struct splice_pipe_desc *spd, int linear) |
| static int | __skb_splice_bits (struct sk_buff *skb, unsigned int *offset, unsigned int *len, struct splice_pipe_desc *spd) |
| int | skb_splice_bits (struct sk_buff *skb, unsigned int offset, struct pipe_inode_info *pipe, unsigned int tlen, unsigned int flags) |
| int | skb_store_bits (struct sk_buff *skb, int offset, const void *from, int len) |
| skb_store_bits - store bits from kernel buffer to skb : destination buffer : offset in destination : source buffer : number of bytes to copy | |
| EXPORT_SYMBOL (skb_store_bits) | |
| __wsum | skb_checksum (const struct sk_buff *skb, int offset, int len, __wsum csum) |
| __wsum | skb_copy_and_csum_bits (const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum) |
| void | skb_copy_and_csum_dev (const struct sk_buff *skb, u8 *to) |
| struct sk_buff * | skb_dequeue (struct sk_buff_head *list) |
| skb_dequeue - remove from the head of the queue : list to dequeue from | |
| struct sk_buff * | skb_dequeue_tail (struct sk_buff_head *list) |
| skb_dequeue_tail - remove from the tail of the queue : list to dequeue from | |
| void | skb_queue_purge (struct sk_buff_head *list) |
| skb_queue_purge - empty a list : list to empty | |
| void | skb_queue_head (struct sk_buff_head *list, struct sk_buff *newsk) |
| skb_queue_head - queue a buffer at the list head : list to use : buffer to queue | |
| void | skb_queue_tail (struct sk_buff_head *list, struct sk_buff *newsk) |
| skb_queue_tail - queue a buffer at the list tail : list to use : buffer to queue | |
| void | skb_unlink (struct sk_buff *skb, struct sk_buff_head *list) |
| skb_unlink - remove a buffer from a list : buffer to remove : list to use | |
| void | skb_append (struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
| skb_append - append a buffer : buffer to insert after : buffer to insert : list to use | |
| void | skb_insert (struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
| skb_insert - insert a buffer : buffer to insert before : buffer to insert : list to use | |
| static void | skb_split_inside_header (struct sk_buff *skb, struct sk_buff *skb1, const u32 len, const int pos) |
| static void | skb_split_no_header (struct sk_buff *skb, struct sk_buff *skb1, const u32 len, int pos) |
| void | skb_split (struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
| skb_split - Split fragmented skb to two parts at length len. | |
| static int | skb_prepare_for_shift (struct sk_buff *skb) |
| int | skb_shift (struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
| skb_shift - Shifts paged data partially from skb to another : buffer into which tail data gets added : buffer from which the paged data comes from : shift up to this many bytes | |
| void | skb_prepare_seq_read (struct sk_buff *skb, unsigned int from, unsigned int to, struct skb_seq_state *st) |
| skb_prepare_seq_read - Prepare a sequential read of skb data : the buffer to read : lower offset of data to be read : upper offset of data to be read : state variable | |
| unsigned int | skb_seq_read (unsigned int consumed, const u8 **data, struct skb_seq_state *st) |
| skb_seq_read - Sequentially read skb data : number of bytes consumed by the caller so far : destination pointer for data to be returned : state variable | |
| void | skb_abort_seq_read (struct skb_seq_state *st) |
| skb_abort_seq_read - Abort a sequential read of skb data : state variable | |
| static unsigned int | skb_ts_get_next_block (unsigned int offset, const u8 **text, struct ts_config *conf, struct ts_state *state) |
| static void | skb_ts_finish (struct ts_config *conf, struct ts_state *state) |
| unsigned int | skb_find_text (struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config, struct ts_state *state) |
| skb_find_text - Find a text pattern in skb data : the buffer to look in : search offset : search limit : textsearch configuration : uninitialized textsearch state variable | |
| int | skb_append_datato_frags (struct sock *sk, struct sk_buff *skb, int(*getfrag)(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length) |
| skb_append_datato_frags: - append the user data to a skb : sock structure : skb structure to be appened with user data. | |
| unsigned char * | skb_pull_rcsum (struct sk_buff *skb, unsigned int len) |
| skb_pull_rcsum - pull skb and update receive checksum : buffer to update : length of data pulled | |
| EXPORT_SYMBOL_GPL (skb_pull_rcsum) | |
| struct sk_buff * | skb_segment (struct sk_buff *skb, int features) |
| skb_segment - Perform protocol segmentation on skb. | |
| EXPORT_SYMBOL_GPL (skb_segment) | |
| int | skb_gro_receive (struct sk_buff **head, struct sk_buff *skb) |
| EXPORT_SYMBOL_GPL (skb_gro_receive) | |
| void __init | skb_init (void) |
| static int | __skb_to_sgvec (struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| skb_to_sgvec - Fill a scatter-gather list from a socket buffer : Socket buffer containing the buffers to be mapped : The scatter-gather list to map into : The offset into the buffer's contents to start mapping : Length of buffer space to be mapped | |
| int | skb_to_sgvec (struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| int | skb_cow_data (struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
| skb_cow_data - Check that a socket buffer's data buffers are writable : The socket buffer to check. | |
| bool | skb_partial_csum_set (struct sk_buff *skb, u16 start, u16 off) |
| skb_partial_csum_set - set up and verify partial csum values for packet : the skb to set : the number of bytes after skb->data to start checksumming. | |
| void | __skb_warn_lro_forwarding (const struct sk_buff *skb) |
| EXPORT_SYMBOL (___pskb_trim) | |
| EXPORT_SYMBOL (__kfree_skb) | |
| EXPORT_SYMBOL (kfree_skb) | |
| EXPORT_SYMBOL (__pskb_pull_tail) | |
| EXPORT_SYMBOL (__alloc_skb) | |
| EXPORT_SYMBOL (__netdev_alloc_skb) | |
| EXPORT_SYMBOL (pskb_copy) | |
| EXPORT_SYMBOL (pskb_expand_head) | |
| EXPORT_SYMBOL (skb_checksum) | |
| EXPORT_SYMBOL (skb_clone) | |
| EXPORT_SYMBOL (skb_copy) | |
| EXPORT_SYMBOL (skb_copy_and_csum_bits) | |
| EXPORT_SYMBOL (skb_copy_and_csum_dev) | |
| EXPORT_SYMBOL (skb_copy_bits) | |
| EXPORT_SYMBOL (skb_copy_expand) | |
| EXPORT_SYMBOL (skb_over_panic) | |
| EXPORT_SYMBOL (skb_pad) | |
| EXPORT_SYMBOL (skb_realloc_headroom) | |
| EXPORT_SYMBOL (skb_under_panic) | |
| EXPORT_SYMBOL (skb_dequeue) | |
| EXPORT_SYMBOL (skb_dequeue_tail) | |
| EXPORT_SYMBOL (skb_insert) | |
| EXPORT_SYMBOL (skb_queue_purge) | |
| EXPORT_SYMBOL (skb_queue_head) | |
| EXPORT_SYMBOL (skb_queue_tail) | |
| EXPORT_SYMBOL (skb_unlink) | |
| EXPORT_SYMBOL (skb_append) | |
| EXPORT_SYMBOL (skb_split) | |
| EXPORT_SYMBOL (skb_prepare_seq_read) | |
| EXPORT_SYMBOL (skb_seq_read) | |
| EXPORT_SYMBOL (skb_abort_seq_read) | |
| EXPORT_SYMBOL (skb_find_text) | |
| EXPORT_SYMBOL (skb_append_datato_frags) | |
| EXPORT_SYMBOL (__skb_warn_lro_forwarding) | |
| EXPORT_SYMBOL_GPL (skb_to_sgvec) | |
| EXPORT_SYMBOL_GPL (skb_cow_data) | |
| EXPORT_SYMBOL_GPL (skb_partial_csum_set) | |
Variables | |
| static struct kmem_cache *skbuff_head_cache | __read_mostly |
| static struct pipe_buf_operations | sock_pipe_buf_ops |
| #define C | ( | x | ) | n->x = skb->x |
| #define TS_SKB_CB | ( | state | ) | ((struct skb_seq_state *) &((state)->cb)) |
| int ___pskb_trim | ( | struct sk_buff * | skb, | |
| unsigned int | len | |||
| ) |
| struct sk_buff* __alloc_skb | ( | unsigned int | size, | |
| gfp_t | gfp_mask, | |||
| int | fclone, | |||
| int | node | |||
| ) | [read] |
__alloc_skb - allocate a network buffer : size to allocate : allocation mask : allocate from fclone cache instead of head cache and allocate a cloned (child) skb : numa node to allocate memory on
Allocate a new &sk_buff. The returned buffer has no headroom and a tail room of size bytes. The object has a reference count of one. The return is the buffer. On a failure the return is NULL.
Buffers may only be allocated from interrupts using a of GFP_ATOMIC.
| static void __copy_skb_header | ( | struct sk_buff * | new, | |
| const struct sk_buff * | old | |||
| ) | [static] |
| void __kfree_skb | ( | struct sk_buff * | skb | ) |
| struct page* __netdev_alloc_page | ( | struct net_device * | dev, | |
| gfp_t | gfp_mask | |||
| ) | [read] |
| struct sk_buff* __netdev_alloc_skb | ( | struct net_device * | dev, | |
| unsigned int | length, | |||
| gfp_t | gfp_mask | |||
| ) | [read] |
__netdev_alloc_skb - allocate an skbuff for rx on a specific device : network device to receive on : length to allocate : get_free_pages mask, passed to alloc_skb
Allocate a new &sk_buff and assign it a usage count of one. The buffer has unspecified headroom built in. Users should allocate the headroom they think they need without accounting for the built in space. The built in space is used for optimisations.
NULL is returned if there is no free memory.
| unsigned char* __pskb_pull_tail | ( | struct sk_buff * | skb, | |
| int | delta | |||
| ) |
__pskb_pull_tail - advance tail of skb header : buffer to reallocate : number of bytes to advance tail
The function makes a sense only on a fragmented &sk_buff, it expands header moving its tail forward and copying necessary data from fragmented part.
&sk_buff MUST have reference count of 1.
Returns NULL (and &sk_buff does not change) if pull failed or value of new tail of skb in the case of success.
All the pointers pointing into skb header may change and must be reloaded after call to this function.
| static void __segment_seek | ( | struct page ** | page, | |
| unsigned int * | poff, | |||
| unsigned int * | plen, | |||
| unsigned int | off | |||
| ) | [inline, static] |
| static struct sk_buff* __skb_clone | ( | struct sk_buff * | n, | |
| struct sk_buff * | skb | |||
| ) | [static, read] |
| static int __skb_splice_bits | ( | struct sk_buff * | skb, | |
| unsigned int * | offset, | |||
| unsigned int * | len, | |||
| struct splice_pipe_desc * | spd | |||
| ) | [static] |
| static int __skb_to_sgvec | ( | struct sk_buff * | skb, | |
| struct scatterlist * | sg, | |||
| int | offset, | |||
| int | len | |||
| ) | [static] |
skb_to_sgvec - Fill a scatter-gather list from a socket buffer : Socket buffer containing the buffers to be mapped : The scatter-gather list to map into : The offset into the buffer's contents to start mapping : Length of buffer space to be mapped
Fill the specified scatter-gather list with mappings/pointers into a region of the buffer space attached to a socket buffer.
| void __skb_warn_lro_forwarding | ( | const struct sk_buff * | skb | ) |
| static int __splice_segment | ( | struct page * | page, | |
| unsigned int | poff, | |||
| unsigned int | plen, | |||
| unsigned int * | off, | |||
| unsigned int * | len, | |||
| struct sk_buff * | skb, | |||
| struct splice_pipe_desc * | spd, | |||
| int | linear | |||
| ) | [inline, static] |
| static void copy_skb_header | ( | struct sk_buff * | new, | |
| const struct sk_buff * | old | |||
| ) | [static] |
| struct sk_buff* dev_alloc_skb | ( | unsigned int | length | ) | [read] |
dev_alloc_skb - allocate an skbuff for receiving : length to allocate
Allocate a new &sk_buff and assign it a usage count of one. The buffer has unspecified headroom built in. Users should allocate the headroom they think they need without accounting for the built in space. The built in space is used for optimisations.
NULL is returned if there is no free memory. Although this function allocates memory it can be called from an interrupt.
| EXPORT_SYMBOL | ( | __skb_warn_lro_forwarding | ) |
| EXPORT_SYMBOL | ( | skb_append_datato_frags | ) |
| EXPORT_SYMBOL | ( | skb_find_text | ) |
| EXPORT_SYMBOL | ( | skb_abort_seq_read | ) |
| EXPORT_SYMBOL | ( | skb_seq_read | ) |
| EXPORT_SYMBOL | ( | skb_prepare_seq_read | ) |
| EXPORT_SYMBOL | ( | skb_split | ) |
| EXPORT_SYMBOL | ( | skb_append | ) |
| EXPORT_SYMBOL | ( | skb_unlink | ) |
| EXPORT_SYMBOL | ( | skb_queue_tail | ) |
| EXPORT_SYMBOL | ( | skb_queue_head | ) |
| EXPORT_SYMBOL | ( | skb_queue_purge | ) |
| EXPORT_SYMBOL | ( | skb_insert | ) |
| EXPORT_SYMBOL | ( | skb_dequeue_tail | ) |
| EXPORT_SYMBOL | ( | skb_dequeue | ) |
| EXPORT_SYMBOL | ( | skb_under_panic | ) |
| EXPORT_SYMBOL | ( | skb_realloc_headroom | ) |
| EXPORT_SYMBOL | ( | skb_pad | ) |
| EXPORT_SYMBOL | ( | skb_over_panic | ) |
| EXPORT_SYMBOL | ( | skb_copy_expand | ) |
| EXPORT_SYMBOL | ( | skb_copy_bits | ) |
| EXPORT_SYMBOL | ( | skb_copy_and_csum_dev | ) |
| EXPORT_SYMBOL | ( | skb_copy_and_csum_bits | ) |
| EXPORT_SYMBOL | ( | skb_copy | ) |
| EXPORT_SYMBOL | ( | skb_clone | ) |
| EXPORT_SYMBOL | ( | skb_checksum | ) |
| EXPORT_SYMBOL | ( | pskb_expand_head | ) |
| EXPORT_SYMBOL | ( | pskb_copy | ) |
| EXPORT_SYMBOL | ( | __netdev_alloc_skb | ) |
| EXPORT_SYMBOL | ( | __alloc_skb | ) |
| EXPORT_SYMBOL | ( | __pskb_pull_tail | ) |
| EXPORT_SYMBOL | ( | kfree_skb | ) |
| EXPORT_SYMBOL | ( | __kfree_skb | ) |
| EXPORT_SYMBOL | ( | ___pskb_trim | ) |
| EXPORT_SYMBOL | ( | skb_store_bits | ) |
| EXPORT_SYMBOL | ( | skb_trim | ) |
| EXPORT_SYMBOL | ( | skb_pull | ) |
| EXPORT_SYMBOL | ( | skb_push | ) |
| EXPORT_SYMBOL | ( | skb_put | ) |
| EXPORT_SYMBOL | ( | skb_recycle_check | ) |
| EXPORT_SYMBOL | ( | dev_alloc_skb | ) |
| EXPORT_SYMBOL | ( | skb_add_rx_frag | ) |
| EXPORT_SYMBOL | ( | __netdev_alloc_page | ) |
| EXPORT_SYMBOL_GPL | ( | skb_partial_csum_set | ) |
| EXPORT_SYMBOL_GPL | ( | skb_cow_data | ) |
| EXPORT_SYMBOL_GPL | ( | skb_to_sgvec | ) |
| EXPORT_SYMBOL_GPL | ( | skb_gro_receive | ) |
| EXPORT_SYMBOL_GPL | ( | skb_segment | ) |
| EXPORT_SYMBOL_GPL | ( | skb_pull_rcsum | ) |
| EXPORT_SYMBOL_GPL | ( | skb_morph | ) |
| void kfree_skb | ( | struct sk_buff * | skb | ) |
| static struct page* linear_to_page | ( | struct page * | page, | |
| unsigned int | len, | |||
| unsigned int | offset | |||
| ) | [static, read] |
| struct sk_buff* pskb_copy | ( | struct sk_buff * | skb, | |
| gfp_t | gfp_mask | |||
| ) | [read] |
pskb_copy - create copy of an sk_buff with private head.
: buffer to copy : allocation priority
Make a copy of both an &sk_buff and part of its data, located in header. Fragmented data remain shared. This is used when the caller wishes to modify only header of &sk_buff and needs private copy of the header to alter. Returns NULL on failure or the pointer to the buffer on success. The returned buffer has a reference count of 1.
| int pskb_expand_head | ( | struct sk_buff * | skb, | |
| int | nhead, | |||
| int | ntail, | |||
| gfp_t | gfp_mask | |||
| ) |
pskb_expand_head - reallocate header of &sk_buff : buffer to reallocate : room to add at head : room to add at tail : allocation priority
Expands (or creates identical copy, if &nhead and &ntail are zero) header of skb. &sk_buff itself is not changed. &sk_buff MUST have reference count of 1. Returns zero in the case of success or error, if expansion failed. In the last case, &sk_buff is not changed.
All the pointers pointing into skb header may change and must be reloaded after call to this function.
| void skb_abort_seq_read | ( | struct skb_seq_state * | st | ) |
skb_abort_seq_read - Abort a sequential read of skb data : state variable
Must be called if skb_seq_read() was not called until it returned 0.
| void skb_add_rx_frag | ( | struct sk_buff * | skb, | |
| int | i, | |||
| struct page * | page, | |||
| int | off, | |||
| int | size | |||
| ) |
| void skb_append | ( | struct sk_buff * | old, | |
| struct sk_buff * | newsk, | |||
| struct sk_buff_head * | list | |||
| ) |
skb_append - append a buffer : buffer to insert after : buffer to insert : list to use
Place a packet after a given packet in a list. The list locks are taken and this function is atomic with respect to other list locked calls. A buffer cannot be placed on two lists at the same time.
| int skb_append_datato_frags | ( | struct sock * | sk, | |
| struct sk_buff * | skb, | |||
| int(*)(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) | getfrag, | |||
| void * | from, | |||
| int | length | |||
| ) |
skb_append_datato_frags: - append the user data to a skb : sock structure : skb structure to be appened with user data.
: call back function to be used for getting the user data : pointer to user message iov : length of the iov message
Description: This procedure append the user data in the fragment part of the skb if any page alloc fails user this procedure returns -ENOMEM
| __wsum skb_checksum | ( | const struct sk_buff * | skb, | |
| int | offset, | |||
| int | len, | |||
| __wsum | csum | |||
| ) |
| struct sk_buff* skb_clone | ( | struct sk_buff * | skb, | |
| gfp_t | gfp_mask | |||
| ) | [read] |
skb_clone - duplicate an sk_buff : buffer to clone : allocation priority
Duplicate an &sk_buff. The new one is not owned by a socket. Both copies share the same packet data but not structure. The new buffer has a reference count of 1. If the allocation fails the function returns NULL otherwise the new buffer is returned.
If this function is called from an interrupt gfp_mask() must be GFP_ATOMIC.
| static void skb_clone_fraglist | ( | struct sk_buff * | skb | ) | [static] |
| struct sk_buff* skb_copy | ( | const struct sk_buff * | skb, | |
| gfp_t | gfp_mask | |||
| ) | [read] |
skb_copy - create private copy of an sk_buff : buffer to copy : allocation priority
Make a copy of both an &sk_buff and its data. This is used when the caller wishes to modify the data and needs a private copy of the data to alter. Returns NULL on failure or the pointer to the buffer on success. The returned buffer has a reference count of 1.
As by-product this function converts non-linear &sk_buff to linear one, so that &sk_buff becomes completely private and caller is allowed to modify all the data of returned buffer. This means that this function is not recommended for use in circumstances when only header is going to be modified. Use pskb_copy() instead.
| __wsum skb_copy_and_csum_bits | ( | const struct sk_buff * | skb, | |
| int | offset, | |||
| u8 * | to, | |||
| int | len, | |||
| __wsum | csum | |||
| ) |
| void skb_copy_and_csum_dev | ( | const struct sk_buff * | skb, | |
| u8 * | to | |||
| ) |
| int skb_copy_bits | ( | const struct sk_buff * | skb, | |
| int | offset, | |||
| void * | to, | |||
| int | len | |||
| ) |
| struct sk_buff* skb_copy_expand | ( | const struct sk_buff * | skb, | |
| int | newheadroom, | |||
| int | newtailroom, | |||
| gfp_t | gfp_mask | |||
| ) | [read] |
skb_copy_expand - copy and expand sk_buff : buffer to copy : new free bytes at head : new free bytes at tail : allocation priority
Make a copy of both an &sk_buff and its data and while doing so allocate additional space.
This is used when the caller wishes to modify the data and needs a private copy of the data to alter as well as more space for new fields. Returns NULL on failure or the pointer to the buffer on success. The returned buffer has a reference count of 1.
You must pass GFP_ATOMIC as the allocation priority if this function is called from an interrupt.
| int skb_cow_data | ( | struct sk_buff * | skb, | |
| int | tailbits, | |||
| struct sk_buff ** | trailer | |||
| ) |
skb_cow_data - Check that a socket buffer's data buffers are writable : The socket buffer to check.
: Amount of trailing space to be added : Returned pointer to the skb where the space begins
Make sure that the data buffers attached to a socket buffer are writable. If they are not, private copies are made of the data buffers and the socket buffer is set to use these instead.
If is given, make sure that there is space to write bytes of data beyond current end of socket buffer. will be set to point to the skb in which this space begins.
The number of scatterlist elements required to completely map the COW'd and extended socket buffer will be returned.
| struct sk_buff* skb_dequeue | ( | struct sk_buff_head * | list | ) | [read] |
| struct sk_buff* skb_dequeue_tail | ( | struct sk_buff_head * | list | ) | [read] |
| static void skb_drop_fraglist | ( | struct sk_buff * | skb | ) | [inline, static] |
| static void skb_drop_list | ( | struct sk_buff ** | listp | ) | [static] |
| unsigned int skb_find_text | ( | struct sk_buff * | skb, | |
| unsigned int | from, | |||
| unsigned int | to, | |||
| struct ts_config * | config, | |||
| struct ts_state * | state | |||
| ) |
skb_find_text - Find a text pattern in skb data : the buffer to look in : search offset : search limit : textsearch configuration : uninitialized textsearch state variable
Finds a pattern in the skb data according to the specified textsearch configuration. Use textsearch_next() to retrieve subsequent occurrences of the pattern. Returns the offset to the first occurrence or UINT_MAX if no match was found.
| int skb_gro_receive | ( | struct sk_buff ** | head, | |
| struct sk_buff * | skb | |||
| ) |
| void skb_insert | ( | struct sk_buff * | old, | |
| struct sk_buff * | newsk, | |||
| struct sk_buff_head * | list | |||
| ) |
skb_insert - insert a buffer : buffer to insert before : buffer to insert : list to use
Place a packet before a given packet in a list. The list locks are taken and this function is atomic with respect to other list locked calls.
A buffer cannot be placed on two lists at the same time.
| struct sk_buff* skb_morph | ( | struct sk_buff * | dst, | |
| struct sk_buff * | src | |||
| ) | [read] |
| void skb_over_panic | ( | struct sk_buff * | skb, | |
| int | sz, | |||
| void * | here | |||
| ) |
| int skb_pad | ( | struct sk_buff * | skb, | |
| int | pad | |||
| ) |
skb_pad - zero pad the tail of an skb : buffer to pad : space to pad
Ensure that a buffer is followed by a padding area that is zero filled. Used by network drivers which may DMA or transfer data beyond the buffer end onto the wire.
May return error in out of memory cases. The skb is freed on error.
| bool skb_partial_csum_set | ( | struct sk_buff * | skb, | |
| u16 | start, | |||
| u16 | off | |||
| ) |
skb_partial_csum_set - set up and verify partial csum values for packet : the skb to set : the number of bytes after skb->data to start checksumming.
: the offset from start to place the checksum.
For untrusted partially-checksummed packets, we need to make sure the values for skb->csum_start and skb->csum_offset are valid so we don't oops.
This function checks and sets those values and skb->ip_summed: if this returns false you should drop the packet.
| static int skb_prepare_for_shift | ( | struct sk_buff * | skb | ) | [static] |
| void skb_prepare_seq_read | ( | struct sk_buff * | skb, | |
| unsigned int | from, | |||
| unsigned int | to, | |||
| struct skb_seq_state * | st | |||
| ) |
skb_prepare_seq_read - Prepare a sequential read of skb data : the buffer to read : lower offset of data to be read : upper offset of data to be read : state variable
Initializes the specified state variable. Must be called before invoking skb_seq_read() for the first time.
| unsigned char* skb_pull | ( | struct sk_buff * | skb, | |
| unsigned int | len | |||
| ) |
skb_pull - remove data from the start of a buffer : buffer to use : amount of data to remove
This function removes data from the start of a buffer, returning the memory to the headroom. A pointer to the next data in the buffer is returned. Once the data has been pulled future pushes will overwrite the old data.
| unsigned char* skb_pull_rcsum | ( | struct sk_buff * | skb, | |
| unsigned int | len | |||
| ) |
skb_pull_rcsum - pull skb and update receive checksum : buffer to update : length of data pulled
This function performs an skb_pull on the packet and updates the CHECKSUM_COMPLETE checksum. It should be used on receive path processing instead of skb_pull unless you know that the checksum difference is zero (e.g., a valid IP header) or you are setting ip_summed to CHECKSUM_NONE.
| unsigned char* skb_push | ( | struct sk_buff * | skb, | |
| unsigned int | len | |||
| ) |
| unsigned char* skb_put | ( | struct sk_buff * | skb, | |
| unsigned int | len | |||
| ) |
| void skb_queue_head | ( | struct sk_buff_head * | list, | |
| struct sk_buff * | newsk | |||
| ) |
| void skb_queue_purge | ( | struct sk_buff_head * | list | ) |
| void skb_queue_tail | ( | struct sk_buff_head * | list, | |
| struct sk_buff * | newsk | |||
| ) |
| struct sk_buff* skb_realloc_headroom | ( | struct sk_buff * | skb, | |
| unsigned int | headroom | |||
| ) | [read] |
| int skb_recycle_check | ( | struct sk_buff * | skb, | |
| int | skb_size | |||
| ) |
skb_recycle_check - check if skb can be reused for receive : buffer : minimum receive buffer size
Checks that the skb passed in is not shared or cloned, and that it is linear and its head portion at least as large as skb_size so that it can be recycled as a receive buffer. If these conditions are met, this function does any necessary reference count dropping and cleans up the skbuff as if it just came from __alloc_skb().
| static void skb_release_all | ( | struct sk_buff * | skb | ) | [static] |
| static void skb_release_data | ( | struct sk_buff * | skb | ) | [static] |
| static void skb_release_head_state | ( | struct sk_buff * | skb | ) | [static] |
| struct sk_buff* skb_segment | ( | struct sk_buff * | skb, | |
| int | features | |||
| ) | [read] |
| unsigned int skb_seq_read | ( | unsigned int | consumed, | |
| const u8 ** | data, | |||
| struct skb_seq_state * | st | |||
| ) |
skb_seq_read - Sequentially read skb data : number of bytes consumed by the caller so far : destination pointer for data to be returned : state variable
Reads a block of skb data at &consumed relative to the lower offset specified to skb_prepare_seq_read(). Assigns the head of the data block to &data and returns the length of the block or 0 if the end of the skb data or the upper offset has been reached.
The caller is not required to consume all of the data returned, i.e. &consumed is typically set to the number of bytes already consumed and the next call to skb_seq_read() will return the remaining part of the block.
Note 1: The size of each block of data returned can be arbitary, this limitation is the cost for zerocopy seqeuental reads of potentially non linear data.
Note 2: Fragment lists within fragments are not implemented at the moment, state->root_skb could be replaced with a stack for this purpose.
| int skb_shift | ( | struct sk_buff * | tgt, | |
| struct sk_buff * | skb, | |||
| int | shiftlen | |||
| ) |
skb_shift - Shifts paged data partially from skb to another : buffer into which tail data gets added : buffer from which the paged data comes from : shift up to this many bytes
Attempts to shift up to shiftlen worth of bytes, which may be less than the length of the skb, from tgt to skb. Returns number bytes shifted. It's up to caller to free skb if everything was shifted.
If runs out of frags, the whole operation is aborted.
Skb cannot include anything else but paged data while tgt is allowed to have non-paged data as well.
TODO: full sized shift could be optimized but that would need specialized skb free'er to handle frags without up-to-date nr_frags.
| int skb_splice_bits | ( | struct sk_buff * | skb, | |
| unsigned int | offset, | |||
| struct pipe_inode_info * | pipe, | |||
| unsigned int | tlen, | |||
| unsigned int | flags | |||
| ) |
| void skb_split | ( | struct sk_buff * | skb, | |
| struct sk_buff * | skb1, | |||
| const u32 | len | |||
| ) |
| static void skb_split_inside_header | ( | struct sk_buff * | skb, | |
| struct sk_buff * | skb1, | |||
| const u32 | len, | |||
| const int | pos | |||
| ) | [inline, static] |
| static void skb_split_no_header | ( | struct sk_buff * | skb, | |
| struct sk_buff * | skb1, | |||
| const u32 | len, | |||
| int | pos | |||
| ) | [inline, static] |
| int skb_store_bits | ( | struct sk_buff * | skb, | |
| int | offset, | |||
| const void * | from, | |||
| int | len | |||
| ) |
skb_store_bits - store bits from kernel buffer to skb : destination buffer : offset in destination : source buffer : number of bytes to copy
Copy the specified number of bytes from the source buffer to the destination skb. This function handles all the messy bits of traversing fragment lists and such.
| int skb_to_sgvec | ( | struct sk_buff * | skb, | |
| struct scatterlist * | sg, | |||
| int | offset, | |||
| int | len | |||
| ) |
| void skb_trim | ( | struct sk_buff * | skb, | |
| unsigned int | len | |||
| ) |
| static void skb_ts_finish | ( | struct ts_config * | conf, | |
| struct ts_state * | state | |||
| ) | [static] |
| static unsigned int skb_ts_get_next_block | ( | unsigned int | offset, | |
| const u8 ** | text, | |||
| struct ts_config * | conf, | |||
| struct ts_state * | state | |||
| ) | [static] |
| void skb_under_panic | ( | struct sk_buff * | skb, | |
| int | sz, | |||
| void * | here | |||
| ) |
skb_under_panic - private function : buffer : size : address
Out of line support code for skb_push(). Not user callable.
| void skb_unlink | ( | struct sk_buff * | skb, | |
| struct sk_buff_head * | list | |||
| ) |
| static void sock_pipe_buf_get | ( | struct pipe_inode_info * | pipe, | |
| struct pipe_buffer * | buf | |||
| ) | [static] |
| static void sock_pipe_buf_release | ( | struct pipe_inode_info * | pipe, | |
| struct pipe_buffer * | buf | |||
| ) | [static] |
| static int sock_pipe_buf_steal | ( | struct pipe_inode_info * | pipe, | |
| struct pipe_buffer * | buf | |||
| ) | [static] |
| static void sock_spd_release | ( | struct splice_pipe_desc * | spd, | |
| unsigned int | i | |||
| ) | [static] |
| static int spd_fill_page | ( | struct splice_pipe_desc * | spd, | |
| struct page * | page, | |||
| unsigned int | len, | |||
| unsigned int | offset, | |||
| struct sk_buff * | skb, | |||
| int | linear | |||
| ) | [inline, static] |
struct kmem_cache* skbuff_fclone_cache __read_mostly [static, read] |
struct pipe_buf_operations sock_pipe_buf_ops [static] |
Initial value:
{
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = sock_pipe_buf_release,
.steal = sock_pipe_buf_steal,
.get = sock_pipe_buf_get,
}
1.5.6