ReactOS 0.4.16-dev-320-g3bd9ddc
tcpcore.h File Reference
#include "tcpdef.h"
#include "linux.h"
Include dependency graph for tcpcore.h:

Go to the source code of this file.

Classes

struct  sk_buff_head
 
struct  skb_frag_struct
 
struct  skb_shared_info
 
struct  sk_buff
 
struct  sockaddr
 
struct  msghdr
 
struct  proto
 
struct  tcp_sack_block
 
struct  tcp_opt
 
struct  socket_lock_t
 
struct  sock
 
struct  dst_entry
 
struct  dst_ops
 
struct  tcp_ehash_bucket
 
struct  tcp_bind_bucket
 
struct  tcp_bind_hashbucket
 
struct  tcp_hashinfo
 
struct  tcp_tw_bucket
 
struct  or_calltable
 
struct  tcp_v4_open_req
 
struct  open_request
 
struct  tcp_func
 
struct  tcp_skb_cb
 
struct  tcp_listen_opt
 

Macros

#define HAVE_ALLOC_SKB   /* For the drivers to know */
 
#define HAVE_ALIGNABLE_SKB   /* Ditto 8) */
 
#define SLAB_SKB   /* Slabified skbuffs */
 
#define CHECKSUM_NONE   0
 
#define CHECKSUM_HW   1
 
#define CHECKSUM_UNNECESSARY   2
 
#define SKB_DATA_ALIGN(X)   (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
 
#define SKB_MAX_ORDER(X, ORDER)   (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
 
#define SKB_MAX_HEAD(X)   (SKB_MAX_ORDER((X),0))
 
#define SKB_MAX_ALLOC   (SKB_MAX_ORDER(0,2))
 
#define NET_CALLER(arg)   __builtin_return_address(0)
 
#define MAX_SKB_FRAGS   6
 
#define SK_WMEM_MAX   65535
 
#define SK_RMEM_MAX   65535
 
#define dev_kfree_skb(a)   kfree_skb(a)
 
#define skb_shinfo(SKB)   ((struct skb_shared_info *)((SKB)->end))
 
#define SKB_PAGE_ASSERT(skb)   do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0)
 
#define SKB_FRAG_ASSERT(skb)   do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0)
 
#define SKB_LINEAR_ASSERT(skb)   do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0)
 
#define skb_queue_walk(queue, skb)
 
#define RT_CACHE_DEBUG   0
 
#define DST_GC_MIN   (1*HZ)
 
#define DST_GC_INC   (5*HZ)
 
#define DST_GC_MAX   (120*HZ)
 
#define DST_HOST   1
 
#define TCP_DEBUG   1
 
#define FASTRETRANS_DEBUG   1
 
#define TCP_LHTABLE_SIZE   32 /* Yes, really, this is all you need. */
 
#define tcp_ehash   (tcp_hashinfo.__tcp_ehash)
 
#define tcp_bhash   (tcp_hashinfo.__tcp_bhash)
 
#define tcp_ehash_size   (tcp_hashinfo.__tcp_ehash_size)
 
#define tcp_bhash_size   (tcp_hashinfo.__tcp_bhash_size)
 
#define tcp_listening_hash   (tcp_hashinfo.__tcp_listening_hash)
 
#define tcp_lhash_lock   (tcp_hashinfo.__tcp_lhash_lock)
 
#define tcp_lhash_users   (tcp_hashinfo.__tcp_lhash_users)
 
#define tcp_lhash_wait   (tcp_hashinfo.__tcp_lhash_wait)
 
#define tcp_portalloc_lock   (tcp_hashinfo.__tcp_portalloc_lock)
 
#define TCP_COMBINED_PORTS(__sport, __dport)    (((__u32)(__dport)<<16) | (__u32)(__sport))
 
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
 
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)
 
#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif)
 
#define MAX_TCP_HEADER   (128 + MAX_HEADER)
 
#define MAX_TCP_WINDOW   32767U
 
#define TCP_MIN_MSS   88U
 
#define TCP_MIN_RCVMSS   536U
 
#define TCP_FASTRETRANS_THRESH   3
 
#define TCP_MAX_REORDERING   127
 
#define TCP_MAX_QUICKACKS   16U
 
#define TCP_URG_VALID   0x0100
 
#define TCP_URG_NOTYET   0x0200
 
#define TCP_URG_READ   0x0400
 
#define TCP_RETR1
 
#define TCP_RETR2
 
#define TCP_SYN_RETRIES
 
#define TCP_SYNACK_RETRIES
 
#define TCP_ORPHAN_RETRIES
 
#define TCP_TIMEWAIT_LEN   (60*1000)
 
#define TCP_FIN_TIMEOUT   TCP_TIMEWAIT_LEN
 
#define TCP_DELACK_MAX   ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
 
#define TCP_DELACK_MIN   4U
 
#define TCP_ATO_MIN   4U
 
#define TCP_RTO_MAX   ((unsigned)(120*HZ))
 
#define TCP_RTO_MIN   ((unsigned)(HZ/5))
 
#define TCP_TIMEOUT_INIT   ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
 
#define TCP_RESOURCE_PROBE_INTERVAL
 
#define TCP_KEEPALIVE_TIME   (120*60*HZ) /* two hours */
 
#define TCP_KEEPALIVE_PROBES   9 /* Max of 9 keepalive probes */
 
#define TCP_KEEPALIVE_INTVL   (75*HZ)
 
#define MAX_TCP_KEEPIDLE   32767
 
#define MAX_TCP_KEEPINTVL   32767
 
#define MAX_TCP_KEEPCNT   127
 
#define MAX_TCP_SYNCNT   127
 
#define TCP_TWKILL_SLOTS   8 /* Please keep this a power of 2. */
 
#define TCP_TWKILL_PERIOD   (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
 
#define TCP_SYNQ_INTERVAL   (HZ/5) /* Period of SYNACK timer */
 
#define TCP_SYNQ_HSIZE   512 /* Size of SYNACK hash table */
 
#define TCP_PAWS_24DAYS   (60 * 60 * 24 * 24)
 
#define TCP_PAWS_MSL
 
#define TCP_PAWS_WINDOW
 
#define TCP_TW_RECYCLE_SLOTS_LOG   5
 
#define TCP_TW_RECYCLE_SLOTS   (1<<TCP_TW_RECYCLE_SLOTS_LOG)
 
#define TCP_TW_RECYCLE_TICK   (0)
 
#define TCPOPT_NOP   1 /* Padding */
 
#define TCPOPT_EOL   0 /* End of options */
 
#define TCPOPT_MSS   2 /* Segment size negotiating */
 
#define TCPOPT_WINDOW   3 /* Window scaling */
 
#define TCPOPT_SACK_PERM   4 /* SACK Permitted */
 
#define TCPOPT_SACK   5 /* SACK Block */
 
#define TCPOPT_TIMESTAMP   8 /* Better RTT estimations/PAWS */
 
#define TCPOLEN_MSS   4
 
#define TCPOLEN_WINDOW   3
 
#define TCPOLEN_SACK_PERM   2
 
#define TCPOLEN_TIMESTAMP   10
 
#define TCPOLEN_TSTAMP_ALIGNED   12
 
#define TCPOLEN_WSCALE_ALIGNED   4
 
#define TCPOLEN_SACKPERM_ALIGNED   4
 
#define TCPOLEN_SACK_BASE   2
 
#define TCPOLEN_SACK_BASE_ALIGNED   4
 
#define TCPOLEN_SACK_PERBLOCK   8
 
#define TCP_TIME_RETRANS   1 /* Retransmit timer */
 
#define TCP_TIME_DACK   2 /* Delayed ack timer */
 
#define TCP_TIME_PROBE0   3 /* Zero window probe timer */
 
#define TCP_TIME_KEEPOPEN   4 /* Keepalive timer */
 
#define tcp_openreq_alloc()   kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
 
#define tcp_openreq_fastfree(req)   kmem_cache_free(tcp_openreq_cachep, req)
 
#define TCP_INET_FAMILY(fam)   1
 
#define tcp_time_stamp   ((__u32)(jiffies))
 
#define TCPCB_FLAG_FIN   0x01
 
#define TCPCB_FLAG_SYN   0x02
 
#define TCPCB_FLAG_RST   0x04
 
#define TCPCB_FLAG_PSH   0x08
 
#define TCPCB_FLAG_ACK   0x10
 
#define TCPCB_FLAG_URG   0x20
 
#define TCPCB_FLAG_ECE   0x40
 
#define TCPCB_FLAG_CWR   0x80
 
#define TCPCB_SACKED_ACKED   0x01 /* SKB ACK'd by a SACK block */
 
#define TCPCB_SACKED_RETRANS   0x02 /* SKB retransmitted */
 
#define TCPCB_LOST   0x04 /* SKB is lost */
 
#define TCPCB_TAGBITS   0x07 /* All tag bits */
 
#define TCPCB_EVER_RETRANS   0x80 /* Ever retransmitted frame */
 
#define TCPCB_RETRANS   (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 
#define TCPCB_URG   0x20 /* Urgent pointer advenced here */
 
#define TCPCB_AT_TAIL   (TCPCB_URG)
 
#define TCP_SKB_CB(__skb)   ((struct tcp_skb_cb *)&((__skb)->cb[0]))
 
#define for_retrans_queue(skb, sk, tp)
 
#define TCP_MEM_QUANTUM   ((int)PAGE_SIZE)
 
#define TCP_CHECK_TIMER(sk)   do { } while (0)
 

Typedefs

typedef struct skb_frag_struct skb_frag_t
 
typedef struct sockaddr _sockaddr
 
typedef int(* sk_read_actor_t) (read_descriptor_t *, struct sk_buff *, unsigned int, size_t)
 

Enumerations

enum  tcp_ack_state_t { TCP_ACK_SCHED = 1 , TCP_ACK_TIMER = 2 , TCP_ACK_PUSHED = 4 }
 
enum  tcp_tw_status { TCP_TW_SUCCESS = 0 , TCP_TW_RST = 1 , TCP_TW_ACK = 2 , TCP_TW_SYN = 3 }
 

Functions

void __kfree_skb (struct sk_buff *skb)
 
struct sk_buffalloc_skb (unsigned int size, int priority)
 
void kfree_skbmem (struct sk_buff *skb)
 
struct sk_buffskb_clone (struct sk_buff *skb, int priority)
 
struct sk_buffskb_copy (const struct sk_buff *skb, int priority)
 
struct sk_buffpskb_copy (struct sk_buff *skb, int gfp_mask)
 
int pskb_expand_head (struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
 
struct sk_buffskb_realloc_headroom (struct sk_buff *skb, unsigned int headroom)
 
struct sk_buffskb_copy_expand (const struct sk_buff *skb, int newheadroom, int newtailroom, int priority)
 
void skb_over_panic (struct sk_buff *skb, int len, void *here)
 
void skb_under_panic (struct sk_buff *skb, int len, void *here)
 
static __inline int skb_queue_empty (struct sk_buff_head *list)
 
static __inline struct sk_buffskb_get (struct sk_buff *skb)
 
static __inline void kfree_skb (struct sk_buff *skb)
 
static __inline void kfree_skb_fast (struct sk_buff *skb)
 
static __inline int skb_cloned (struct sk_buff *skb)
 
static __inline int skb_shared (struct sk_buff *skb)
 
static __inline struct sk_buffskb_share_check (struct sk_buff *skb, int pri)
 
static __inline struct sk_buffskb_unshare (struct sk_buff *skb, int pri)
 
static __inline struct sk_buffskb_peek (struct sk_buff_head *list_)
 
static __inline struct sk_buffskb_peek_tail (struct sk_buff_head *list_)
 
static __inline __u32 skb_queue_len (struct sk_buff_head *list_)
 
static __inline void skb_queue_head_init (struct sk_buff_head *list)
 
static __inline void __skb_queue_head (struct sk_buff_head *list, struct sk_buff *newsk)
 
static __inline void skb_queue_head (struct sk_buff_head *list, struct sk_buff *newsk)
 
static __inline void __skb_queue_tail (struct sk_buff_head *list, struct sk_buff *newsk)
 
static __inline void skb_queue_tail (struct sk_buff_head *list, struct sk_buff *newsk)
 
static __inline struct sk_buff__skb_dequeue (struct sk_buff_head *list)
 
static __inline struct sk_buffskb_dequeue (struct sk_buff_head *list)
 
static __inline void __skb_insert (struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list)
 
static __inline void skb_insert (struct sk_buff *old, struct sk_buff *newsk)
 
static __inline void __skb_append (struct sk_buff *old, struct sk_buff *newsk)
 
static __inline void skb_append (struct sk_buff *old, struct sk_buff *newsk)
 
static __inline void __skb_unlink (struct sk_buff *skb, struct sk_buff_head *list)
 
static __inline void skb_unlink (struct sk_buff *skb)
 
static __inline struct sk_buff__skb_dequeue_tail (struct sk_buff_head *list)
 
static __inline struct sk_buffskb_dequeue_tail (struct sk_buff_head *list)
 
static __inline int skb_is_nonlinear (const struct sk_buff *skb)
 
static __inline int skb_headlen (const struct sk_buff *skb)
 
static __inline unsigned char__skb_put (struct sk_buff *skb, unsigned int len)
 
static __inline unsigned charskb_put (struct sk_buff *skb, unsigned int len)
 
static __inline unsigned char__skb_push (struct sk_buff *skb, unsigned int len)
 
static __inline unsigned charskb_push (struct sk_buff *skb, unsigned int len)
 
static __inline char__skb_pull (struct sk_buff *skb, unsigned int len)
 
static __inline unsigned charskb_pull (struct sk_buff *skb, unsigned int len)
 
unsigned char__pskb_pull_tail (struct sk_buff *skb, int delta)
 
static __inline char__pskb_pull (struct sk_buff *skb, unsigned int len)
 
static __inline unsigned charpskb_pull (struct sk_buff *skb, unsigned int len)
 
static __inline int pskb_may_pull (struct sk_buff *skb, unsigned int len)
 
static __inline int skb_headroom (const struct sk_buff *skb)
 
static __inline int skb_tailroom (const struct sk_buff *skb)
 
static __inline void skb_reserve (struct sk_buff *skb, unsigned int len)
 
int ___pskb_trim (struct sk_buff *skb, unsigned int len, int realloc)
 
static __inline void __skb_trim (struct sk_buff *skb, unsigned int len)
 
static __inline void skb_trim (struct sk_buff *skb, unsigned int len)
 
static __inline int __pskb_trim (struct sk_buff *skb, unsigned int len)
 
static __inline int pskb_trim (struct sk_buff *skb, unsigned int len)
 
static __inline void skb_orphan (struct sk_buff *skb)
 
static __inline void skb_queue_purge (struct sk_buff_head *list)
 
static __inline void __skb_queue_purge (struct sk_buff_head *list)
 
static __inline struct sk_buff__dev_alloc_skb (unsigned int length, int gfp_mask)
 
static __inline struct sk_buffdev_alloc_skb (unsigned int length)
 
static __inline int skb_cow (struct sk_buff *skb, unsigned int headroom)
 
int skb_linearize (struct sk_buff *skb, int gfp)
 
static __inline voidkmap_skb_frag (const skb_frag_t *frag)
 
static __inline void kunmap_skb_frag (void *vaddr)
 
struct sk_buffskb_recv_datagram (struct sock *sk, unsigned flags, int noblock, int *err)
 
unsigned int datagram_poll (struct file *file, struct socket *sock, struct poll_table_struct *wait)
 
int skb_copy_datagram (const struct sk_buff *from, int offset, char *to, int size)
 
int skb_copy_datagram_iovec (const struct sk_buff *from, int offset, struct iovec *to, int size)
 
int skb_copy_and_csum_datagram (const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump)
 
int skb_copy_and_csum_datagram_iovec (const struct sk_buff *skb, int hlen, struct iovec *iov)
 
void skb_free_datagram (struct sock *sk, struct sk_buff *skb)
 
unsigned int skb_checksum (const struct sk_buff *skb, int offset, int len, unsigned int csum)
 
int skb_copy_bits (const struct sk_buff *skb, int offset, void *to, int len)
 
unsigned int skb_copy_and_csum_bits (const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum)
 
void skb_copy_and_csum_dev (const struct sk_buff *skb, u8 *to)
 
void skb_init (void)
 
void skb_add_mtu (int mtu)
 
struct tcp_ehash_bucket __attribute__ ((__aligned__(8)))
 
struct tcp_bind_buckettcp_bucket_create (struct tcp_bind_hashbucket *head, unsigned short snum)
 
void tcp_bucket_unlock (struct sock *sk)
 
struct socktcp_v4_lookup_listener (u32 addr, unsigned short hnum, int dif)
 
static __inline int tcp_bhashfn (__u16 lport)
 
static __inline void tcp_tw_put (struct tcp_tw_bucket *tw)
 
void tcp_time_wait (struct sock *sk, int state, int timeo)
 
void tcp_timewait_kill (struct tcp_tw_bucket *tw)
 
void tcp_tw_schedule (struct tcp_tw_bucket *tw, int timeo)
 
void tcp_tw_deschedule (struct tcp_tw_bucket *tw)
 
static __inline int tcp_lhashfn (unsigned short num)
 
static __inline int tcp_sk_listen_hashfn (struct sock *sk)
 
static __inline void tcp_openreq_free (struct open_request *req)
 
__inline int before (__u32 seq1, __u32 seq2)
 
__inline int after (__u32 seq1, __u32 seq2)
 
__inline int between (__u32 seq1, __u32 seq2, __u32 seq3)
 
void tcp_put_port (struct sock *sk)
 
void __tcp_put_port (struct sock *sk)
 
void tcp_inherit_port (struct sock *sk, struct sock *child)
 
void tcp_v4_err (struct sk_buff *skb, u32)
 
void tcp_shutdown (struct sock *sk, int how)
 
int tcp_v4_rcv (struct sk_buff *skb)
 
int tcp_v4_remember_stamp (struct sock *sk)
 
int tcp_v4_tw_remember_stamp (struct tcp_tw_bucket *tw)
 
int tcp_sendmsg (struct sock *sk, struct msghdr *msg, int size)
 
ssize_t tcp_sendpage (struct socket *sock, struct page *page, int offset, size_t size, int flags)
 
int tcp_ioctl (struct sock *sk, int cmd, unsigned long arg)
 
int tcp_rcv_state_process (struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len)
 
int tcp_rcv_established (struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len)
 
static __inline void tcp_schedule_ack (struct tcp_opt *tp)
 
static __inline int tcp_ack_scheduled (struct tcp_opt *tp)
 
static __inline void tcp_dec_quickack_mode (struct tcp_opt *tp)
 
void tcp_enter_quickack_mode (struct tcp_opt *tp)
 
static __inline void tcp_delack_init (struct tcp_opt *tp)
 
static __inline void tcp_clear_options (struct tcp_opt *tp)
 
enum tcp_tw_status tcp_timewait_state_process (struct tcp_tw_bucket *tw, struct sk_buff *skb, struct tcphdr *th, unsigned len)
 
struct socktcp_check_req (struct sock *sk, struct sk_buff *skb, struct open_request *req, struct open_request **prev)
 
int tcp_child_process (struct sock *parent, struct sock *child, struct sk_buff *skb)
 
void tcp_enter_loss (struct sock *sk, int how)
 
void tcp_clear_retrans (struct tcp_opt *tp)
 
void tcp_update_metrics (struct sock *sk)
 
void tcp_close (struct sock *sk, long timeout)
 
struct socktcp_accept (struct sock *sk, int flags, int *err)
 
unsigned int tcp_poll (struct file *file, struct socket *sock, struct poll_table_struct *wait)
 
void tcp_write_space (struct sock *sk)
 
int tcp_getsockopt (struct sock *sk, int level, int optname, char *optval, int *optlen)
 
int tcp_setsockopt (struct sock *sk, int level, int optname, char *optval, int optlen)
 
void tcp_set_keepalive (struct sock *sk, int val)
 
int tcp_recvmsg (struct sock *sk, struct msghdr *msg, int len, int nonblock, int flags, int *addr_len)
 
int tcp_listen_start (struct sock *sk)
 
void tcp_parse_options (struct sk_buff *skb, struct tcp_opt *tp, int estab)
 
int tcp_v4_rebuild_header (struct sock *sk)
 
int tcp_v4_build_header (struct sock *sk, struct sk_buff *skb)
 
void tcp_v4_send_check (struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb)
 
int tcp_v4_conn_request (struct sock *sk, struct sk_buff *skb)
 
struct socktcp_create_openreq_child (struct sock *sk, struct open_request *req, struct sk_buff *skb)
 
struct socktcp_v4_syn_recv_sock (struct sock *sk, struct sk_buff *skb, struct open_request *req, struct dst_entry *dst)
 
int tcp_v4_do_rcv (struct sock *sk, struct sk_buff *skb)
 
int tcp_v4_connect (struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
int tcp_connect (struct sock *sk)
 
struct sk_bufftcp_make_synack (struct sock *sk, struct dst_entry *dst, struct open_request *req)
 
int tcp_disconnect (struct sock *sk, int flags)
 
void tcp_unhash (struct sock *sk)
 
int tcp_v4_hash_connecting (struct sock *sk)
 
struct sockcookie_v4_check (struct sock *sk, struct sk_buff *skb, struct ip_options *opt)
 
__u32 cookie_v4_init_sequence (struct sock *sk, struct sk_buff *skb, __u16 *mss)
 
int tcp_write_xmit (struct sock *, int nonagle)
 
int tcp_retransmit_skb (struct sock *, struct sk_buff *)
 
void tcp_xmit_retransmit_queue (struct sock *)
 
void tcp_simple_retransmit (struct sock *)
 
void tcp_send_probe0 (struct sock *)
 
void tcp_send_partial (struct sock *)
 
int tcp_write_wakeup (struct sock *)
 
void tcp_send_fin (struct sock *sk)
 
void tcp_send_active_reset (struct sock *sk, int priority)
 
int tcp_send_synack (struct sock *)
 
int tcp_transmit_skb (struct sock *, struct sk_buff *)
 
void tcp_send_skb (struct sock *, struct sk_buff *, int force_queue, unsigned mss_now)
 
void tcp_push_one (struct sock *, unsigned mss_now)
 
void tcp_send_ack (struct sock *sk)
 
void tcp_send_delayed_ack (struct sock *sk)
 
void tcp_init_xmit_timers (struct sock *)
 
void tcp_clear_xmit_timers (struct sock *)
 
void tcp_delete_keepalive_timer (struct sock *)
 
void tcp_reset_keepalive_timer (struct sock *, unsigned long)
 
int tcp_sync_mss (struct sock *sk, u32 pmtu)
 
int tcp_read_sock (struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor)
 
static __inline void tcp_clear_xmit_timer (struct sock *sk, int what)
 
static __inline void tcp_reset_xmit_timer (struct sock *sk, int what, unsigned long when)
 
static __inline unsigned int tcp_current_mss (struct sock *sk)
 
static __inline void tcp_initialize_rcv_mss (struct sock *sk)
 
static __inline void __tcp_fast_path_on (struct tcp_opt *tp, u32 snd_wnd)
 
static __inline void tcp_fast_path_on (struct tcp_opt *tp)
 
static __inline void tcp_fast_path_check (struct sock *sk, struct tcp_opt *tp)
 
static __inline u32 tcp_receive_window (struct tcp_opt *tp)
 
u32 __tcp_select_window (struct sock *sk)
 
static __inline int tcp_min_write_space (struct sock *sk)
 
static __inline int tcp_wspace (struct sock *sk)
 
static __inline unsigned int tcp_packets_in_flight (struct tcp_opt *tp)
 
static __inline __u32 tcp_recalc_ssthresh (struct tcp_opt *tp)
 
static __inline __u32 tcp_current_ssthresh (struct tcp_opt *tp)
 
static __inline void tcp_sync_left_out (struct tcp_opt *tp)
 
void tcp_cwnd_application_limited (struct sock *sk)
 
static __inline void tcp_cwnd_validate (struct sock *sk, struct tcp_opt *tp)
 
static __inline void __tcp_enter_cwr (struct tcp_opt *tp)
 
static __inline void tcp_enter_cwr (struct tcp_opt *tp)
 
__u32 tcp_init_cwnd (struct tcp_opt *tp)
 
static __inline __u32 tcp_max_burst (struct tcp_opt *tp)
 
static __inline__ int tcp_minshall_check (struct tcp_opt *tp)
 
static __inline void tcp_minshall_update (struct tcp_opt *tp, int mss, struct sk_buff *skb)
 
static __inline int tcp_nagle_check (struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
 
static __inline int tcp_snd_test (struct tcp_opt *tp, struct sk_buff *skb, unsigned cur_mss, int nonagle)
 
static __inline void tcp_check_probe_timer (struct sock *sk, struct tcp_opt *tp)
 
static __inline int tcp_skb_is_last (struct sock *sk, struct sk_buff *skb)
 
static __inline void __tcp_push_pending_frames (struct sock *sk, struct tcp_opt *tp, unsigned cur_mss, int nonagle)
 
static __inline void tcp_push_pending_frames (struct sock *sk, struct tcp_opt *tp)
 
static __inline int tcp_may_send_now (struct sock *sk, struct tcp_opt *tp)
 
static __inline void tcp_init_wl (struct tcp_opt *tp, u32 ack, u32 seq)
 
static __inline void tcp_update_wl (struct tcp_opt *tp, u32 ack, u32 seq)
 
void tcp_destroy_sock (struct sock *sk)
 
static __inline u16 tcp_v4_check (struct tcphdr *th, int len, unsigned long saddr, unsigned long daddr, unsigned long base)
 
static __inline int __tcp_checksum_complete (struct sk_buff *skb)
 
static __inline int tcp_checksum_complete (struct sk_buff *skb)
 
static __inline void tcp_prequeue_init (struct tcp_opt *tp)
 
static __inline int tcp_prequeue (struct sock *sk, struct sk_buff *skb)
 
static __inline void tcp_set_state (struct sock *sk, int state)
 
static __inline void tcp_done (struct sock *sk)
 
static __inline void tcp_sack_reset (struct tcp_opt *tp)
 
static __inline void tcp_build_and_update_options (__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
 
static __inline void tcp_syn_build_options (__u32 *ptr, int mss, int ts, int sack, int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
 
static __inline void tcp_select_initial_window (int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale)
 
static __inline int tcp_win_from_space (int space)
 
static __inline int tcp_space (struct sock *sk)
 
static __inline int tcp_full_space (struct sock *sk)
 
static __inline void tcp_acceptq_removed (struct sock *sk)
 
static __inline void tcp_acceptq_added (struct sock *sk)
 
static __inline int tcp_acceptq_is_full (struct sock *sk)
 
static __inline void tcp_acceptq_queue (struct sock *sk, struct open_request *req, struct sock *child)
 
static __inline void tcp_synq_removed (struct sock *sk, struct open_request *req)
 
static __inline void tcp_synq_added (struct sock *sk)
 
static __inline int tcp_synq_len (struct sock *sk)
 
static __inline int tcp_synq_young (struct sock *sk)
 
static __inline int tcp_synq_is_full (struct sock *sk)
 
static __inline void tcp_synq_unlink (struct tcp_opt *tp, struct open_request *req, struct open_request **prev)
 
static __inline void tcp_synq_drop (struct sock *sk, struct open_request *req, struct open_request **prev)
 
static __inline void tcp_openreq_init (struct open_request *req, struct tcp_opt *tp, struct sk_buff *skb)
 
static __inline void tcp_free_skb (struct sock *sk, struct sk_buff *skb)
 
static __inline void tcp_charge_skb (struct sock *sk, struct sk_buff *skb)
 
void __tcp_mem_reclaim (struct sock *sk)
 
int tcp_mem_schedule (struct sock *sk, int size, int kind)
 
static __inline void tcp_mem_reclaim (struct sock *sk)
 
static __inline void tcp_enter_memory_pressure (void)
 
static __inline void tcp_moderate_sndbuf (struct sock *sk)
 
static __inline struct sk_bufftcp_alloc_pskb (struct sock *sk, int size, int mem, int gfp)
 
static __inline struct sk_bufftcp_alloc_skb (struct sock *sk, int size, int gfp)
 
static __inline struct pagetcp_alloc_page (struct sock *sk)
 
static __inline void tcp_writequeue_purge (struct sock *sk)
 
void tcp_rfree (struct sk_buff *skb)
 
static __inline void tcp_set_owner_r (struct sk_buff *skb, struct sock *sk)
 
void tcp_listen_wlock (void)
 
static __inline void tcp_listen_lock (void)
 
static __inline void tcp_listen_unlock (void)
 
static __inline int keepalive_intvl_when (struct tcp_opt *tp)
 
static __inline int keepalive_time_when (struct tcp_opt *tp)
 
static __inline int tcp_fin_time (struct tcp_opt *tp)
 
static __inline int tcp_paws_check (struct tcp_opt *tp, int rst)
 

Variables

rwlock_t lock
 
struct sockchain
 
struct tcp_bind_bucket __attribute__
 
struct tcp_hashinfo tcp_hashinfo
 
kmem_cache_ttcp_bucket_cachep
 
int tcp_port_rover
 
kmem_cache_ttcp_timewait_cachep
 
atomic_t tcp_orphan_count
 
int tcp_tw_count
 
atomic_t tcp_memory_allocated
 
atomic_t tcp_sockets_allocated
 
int tcp_memory_pressure
 
kmem_cache_ttcp_openreq_cachep
 
struct proto tcp_prot
 
const char timer_bug_msg []
 

Macro Definition Documentation

◆ CHECKSUM_HW

#define CHECKSUM_HW   1

Definition at line 44 of file tcpcore.h.

◆ CHECKSUM_NONE

#define CHECKSUM_NONE   0

Definition at line 43 of file tcpcore.h.

◆ CHECKSUM_UNNECESSARY

#define CHECKSUM_UNNECESSARY   2

Definition at line 45 of file tcpcore.h.

◆ dev_kfree_skb

#define dev_kfree_skb (   a)    kfree_skb(a)

Definition at line 251 of file tcpcore.h.

◆ DST_GC_INC

#define DST_GC_INC   (5*HZ)

Definition at line 1667 of file tcpcore.h.

◆ DST_GC_MAX

#define DST_GC_MAX   (120*HZ)

Definition at line 1668 of file tcpcore.h.

◆ DST_GC_MIN

#define DST_GC_MIN   (1*HZ)

Definition at line 1666 of file tcpcore.h.

◆ DST_HOST

#define DST_HOST   1

Definition at line 1680 of file tcpcore.h.

◆ FASTRETRANS_DEBUG

#define FASTRETRANS_DEBUG   1

Definition at line 1816 of file tcpcore.h.

◆ for_retrans_queue

#define for_retrans_queue (   skb,
  sk,
  tp 
)
Value:
for (skb = (sk)->write_queue.next; \
(skb != (tp)->send_head) && \
(skb != (struct sk_buff *)&(sk)->write_queue); \
skb=skb->next)
_In_ uint64_t _In_ uint64_t _In_ uint64_t _In_opt_ traverse_ptr * tp
Definition: btrfs.c:2996

Definition at line 2845 of file tcpcore.h.

◆ HAVE_ALIGNABLE_SKB

#define HAVE_ALIGNABLE_SKB   /* Ditto 8) */

Definition at line 40 of file tcpcore.h.

◆ HAVE_ALLOC_SKB

#define HAVE_ALLOC_SKB   /* For the drivers to know */

Definition at line 39 of file tcpcore.h.

◆ MAX_SKB_FRAGS

#define MAX_SKB_FRAGS   6

Definition at line 118 of file tcpcore.h.

◆ MAX_TCP_HEADER

#define MAX_TCP_HEADER   (128 + MAX_HEADER)

Definition at line 2069 of file tcpcore.h.

◆ MAX_TCP_KEEPCNT

#define MAX_TCP_KEEPCNT   127

Definition at line 2140 of file tcpcore.h.

◆ MAX_TCP_KEEPIDLE

#define MAX_TCP_KEEPIDLE   32767

Definition at line 2138 of file tcpcore.h.

◆ MAX_TCP_KEEPINTVL

#define MAX_TCP_KEEPINTVL   32767

Definition at line 2139 of file tcpcore.h.

◆ MAX_TCP_SYNCNT

#define MAX_TCP_SYNCNT   127

Definition at line 2141 of file tcpcore.h.

◆ MAX_TCP_WINDOW

#define MAX_TCP_WINDOW   32767U

Definition at line 2075 of file tcpcore.h.

◆ NET_CALLER

#define NET_CALLER (   arg)    __builtin_return_address(0)

Definition at line 93 of file tcpcore.h.

◆ RT_CACHE_DEBUG

#define RT_CACHE_DEBUG   0

Definition at line 1664 of file tcpcore.h.

◆ SK_RMEM_MAX

#define SK_RMEM_MAX   65535

Definition at line 231 of file tcpcore.h.

◆ SK_WMEM_MAX

#define SK_WMEM_MAX   65535

Definition at line 230 of file tcpcore.h.

◆ SKB_DATA_ALIGN

#define SKB_DATA_ALIGN (   X)    (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))

Definition at line 47 of file tcpcore.h.

◆ SKB_FRAG_ASSERT

#define SKB_FRAG_ASSERT (   skb)    do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0)

Definition at line 768 of file tcpcore.h.

◆ SKB_LINEAR_ASSERT

#define SKB_LINEAR_ASSERT (   skb)    do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0)

Definition at line 769 of file tcpcore.h.

◆ SKB_MAX_ALLOC

#define SKB_MAX_ALLOC   (SKB_MAX_ORDER(0,2))

Definition at line 50 of file tcpcore.h.

◆ SKB_MAX_HEAD

#define SKB_MAX_HEAD (   X)    (SKB_MAX_ORDER((X),0))

Definition at line 49 of file tcpcore.h.

◆ SKB_MAX_ORDER

#define SKB_MAX_ORDER (   X,
  ORDER 
)    (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))

Definition at line 48 of file tcpcore.h.

◆ SKB_PAGE_ASSERT

#define SKB_PAGE_ASSERT (   skb)    do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0)

Definition at line 767 of file tcpcore.h.

◆ skb_queue_walk

#define skb_queue_walk (   queue,
  skb 
)
Value:
for (skb = (queue)->next; \
(skb != (struct sk_buff *)(queue)); \
skb=skb->next)
Definition: _queue.h:67
static unsigned __int64 next
Definition: rand_nt.c:6

Definition at line 1142 of file tcpcore.h.

◆ skb_shinfo

#define skb_shinfo (   SKB)    ((struct skb_shared_info *)((SKB)->end))

Definition at line 256 of file tcpcore.h.

◆ SLAB_SKB

#define SLAB_SKB   /* Slabified skbuffs */

Definition at line 41 of file tcpcore.h.

◆ TCP_ATO_MIN

#define TCP_ATO_MIN   4U

Definition at line 2126 of file tcpcore.h.

◆ tcp_bhash

#define tcp_bhash   (tcp_hashinfo.__tcp_bhash)

Definition at line 1926 of file tcpcore.h.

◆ tcp_bhash_size

#define tcp_bhash_size   (tcp_hashinfo.__tcp_bhash_size)

Definition at line 1928 of file tcpcore.h.

◆ TCP_CHECK_TIMER

#define TCP_CHECK_TIMER (   sk)    do { } while (0)

Definition at line 3825 of file tcpcore.h.

◆ TCP_COMBINED_PORTS

#define TCP_COMBINED_PORTS (   __sport,
  __dport 
)     (((__u32)(__dport)<<16) | (__u32)(__sport))

Definition at line 2018 of file tcpcore.h.

◆ TCP_DEBUG

#define TCP_DEBUG   1

Definition at line 1815 of file tcpcore.h.

◆ TCP_DELACK_MAX

#define TCP_DELACK_MAX   ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */

Definition at line 2120 of file tcpcore.h.

◆ TCP_DELACK_MIN

#define TCP_DELACK_MIN   4U

Definition at line 2125 of file tcpcore.h.

◆ tcp_ehash

#define tcp_ehash   (tcp_hashinfo.__tcp_ehash)

Definition at line 1925 of file tcpcore.h.

◆ tcp_ehash_size

#define tcp_ehash_size   (tcp_hashinfo.__tcp_ehash_size)

Definition at line 1927 of file tcpcore.h.

◆ TCP_FASTRETRANS_THRESH

#define TCP_FASTRETRANS_THRESH   3

Definition at line 2084 of file tcpcore.h.

◆ TCP_FIN_TIMEOUT

#define TCP_FIN_TIMEOUT   TCP_TIMEWAIT_LEN

Definition at line 2113 of file tcpcore.h.

◆ TCP_INET_FAMILY

#define TCP_INET_FAMILY (   fam)    1

Definition at line 2329 of file tcpcore.h.

◆ TCP_IPV4_MATCH

#define TCP_IPV4_MATCH (   __sk,
  __cookie,
  __saddr,
  __daddr,
  __ports,
  __dif 
)
Value:
(((__sk)->daddr == (__saddr)) && \
((__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
(!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
u32 __u32
Definition: btrfs.h:19

Definition at line 2036 of file tcpcore.h.

◆ TCP_IPV6_MATCH

#define TCP_IPV6_MATCH (   __sk,
  __saddr,
  __daddr,
  __ports,
  __dif 
)
Value:
(((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
((__sk)->family == AF_INET6) && \
!ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr)) && \
!ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr)) && \
(!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
#define AF_INET6
Definition: winsock.h:369

Definition at line 2043 of file tcpcore.h.

◆ TCP_KEEPALIVE_INTVL

#define TCP_KEEPALIVE_INTVL   (75*HZ)

Definition at line 2136 of file tcpcore.h.

◆ TCP_KEEPALIVE_PROBES

#define TCP_KEEPALIVE_PROBES   9 /* Max of 9 keepalive probes */

Definition at line 2135 of file tcpcore.h.

◆ TCP_KEEPALIVE_TIME

#define TCP_KEEPALIVE_TIME   (120*60*HZ) /* two hours */

Definition at line 2134 of file tcpcore.h.

◆ tcp_lhash_lock

#define tcp_lhash_lock   (tcp_hashinfo.__tcp_lhash_lock)

Definition at line 1930 of file tcpcore.h.

◆ tcp_lhash_users

#define tcp_lhash_users   (tcp_hashinfo.__tcp_lhash_users)

Definition at line 1931 of file tcpcore.h.

◆ tcp_lhash_wait

#define tcp_lhash_wait   (tcp_hashinfo.__tcp_lhash_wait)

Definition at line 1932 of file tcpcore.h.

◆ TCP_LHTABLE_SIZE

#define TCP_LHTABLE_SIZE   32 /* Yes, really, this is all you need. */

Definition at line 1842 of file tcpcore.h.

◆ tcp_listening_hash

#define tcp_listening_hash   (tcp_hashinfo.__tcp_listening_hash)

Definition at line 1929 of file tcpcore.h.

◆ TCP_MAX_QUICKACKS

#define TCP_MAX_QUICKACKS   16U

Definition at line 2090 of file tcpcore.h.

◆ TCP_MAX_REORDERING

#define TCP_MAX_REORDERING   127

Definition at line 2087 of file tcpcore.h.

◆ TCP_MEM_QUANTUM

#define TCP_MEM_QUANTUM   ((int)PAGE_SIZE)

Definition at line 3617 of file tcpcore.h.

◆ TCP_MIN_MSS

#define TCP_MIN_MSS   88U

Definition at line 2078 of file tcpcore.h.

◆ TCP_MIN_RCVMSS

#define TCP_MIN_RCVMSS   536U

Definition at line 2081 of file tcpcore.h.

◆ tcp_openreq_alloc

#define tcp_openreq_alloc ( )    kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)

Definition at line 2317 of file tcpcore.h.

◆ tcp_openreq_fastfree

#define tcp_openreq_fastfree (   req)    kmem_cache_free(tcp_openreq_cachep, req)

Definition at line 2318 of file tcpcore.h.

◆ TCP_ORPHAN_RETRIES

#define TCP_ORPHAN_RETRIES
Value:
7 /* number of times to retry on an orphaned
* socket. 7 is ~50sec-16min.
*/

Definition at line 2106 of file tcpcore.h.

◆ TCP_PAWS_24DAYS

#define TCP_PAWS_24DAYS   (60 * 60 * 24 * 24)

Definition at line 2150 of file tcpcore.h.

◆ TCP_PAWS_MSL

#define TCP_PAWS_MSL
Value:
60 /* Per-host timestamps are invalidated
* after this time. It should be equal
* (or greater than) TCP_TIMEWAIT_LEN
* to provide reliability equal to one
* provided by timewait state.
*/

Definition at line 2151 of file tcpcore.h.

◆ TCP_PAWS_WINDOW

#define TCP_PAWS_WINDOW
Value:
1 /* Replay window for per-host
* timestamps. It must be less than
* minimal timewait lifetime.
*/

Definition at line 2152 of file tcpcore.h.

◆ tcp_portalloc_lock

#define tcp_portalloc_lock   (tcp_hashinfo.__tcp_portalloc_lock)

Definition at line 1933 of file tcpcore.h.

◆ TCP_RESOURCE_PROBE_INTERVAL

#define TCP_RESOURCE_PROBE_INTERVAL
Value:
((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
static unsigned(__cdecl *hash_bstr)(bstr_t s)
#define HZ
Definition: pchw.c:36

Definition at line 2132 of file tcpcore.h.

◆ TCP_RETR1

#define TCP_RETR1
Value:
3 /*
* This is how many retries it does before it
* tries to figure out if the gateway is
* down. Minimal RFC value is 3; it corresponds
* to ~3sec-8min depending on RTO.
*/

Definition at line 2097 of file tcpcore.h.

◆ TCP_RETR2

#define TCP_RETR2
Value:
15 /*
* This should take at least
* 90 minutes to time out.
* RFC1122 says that the limit is 100 sec.
* 15 is ~13-30min depending on RTO.
*/

Definition at line 2099 of file tcpcore.h.

◆ TCP_RTO_MAX

#define TCP_RTO_MAX   ((unsigned)(120*HZ))

Definition at line 2128 of file tcpcore.h.

◆ TCP_RTO_MIN

#define TCP_RTO_MIN   ((unsigned)(HZ/5))

Definition at line 2129 of file tcpcore.h.

◆ TCP_SKB_CB

#define TCP_SKB_CB (   __skb)    ((struct tcp_skb_cb *)&((__skb)->cb[0]))

Definition at line 2843 of file tcpcore.h.

◆ TCP_SYN_RETRIES

#define TCP_SYN_RETRIES
Value:
5 /* number of times to retry active opening a
* connection: ~180sec is RFC minimum */

Definition at line 2101 of file tcpcore.h.

◆ TCP_SYNACK_RETRIES

#define TCP_SYNACK_RETRIES
Value:
5 /* number of times to retry passive opening a
* connection: ~180sec is RFC minimum */

Definition at line 2103 of file tcpcore.h.

◆ TCP_SYNQ_HSIZE

#define TCP_SYNQ_HSIZE   512 /* Size of SYNACK hash table */

Definition at line 2148 of file tcpcore.h.

◆ TCP_SYNQ_INTERVAL

#define TCP_SYNQ_INTERVAL   (HZ/5) /* Period of SYNACK timer */

Definition at line 2147 of file tcpcore.h.

◆ TCP_TIME_DACK

#define TCP_TIME_DACK   2 /* Delayed ack timer */

Definition at line 2215 of file tcpcore.h.

◆ TCP_TIME_KEEPOPEN

#define TCP_TIME_KEEPOPEN   4 /* Keepalive timer */

Definition at line 2217 of file tcpcore.h.

◆ TCP_TIME_PROBE0

#define TCP_TIME_PROBE0   3 /* Zero window probe timer */

Definition at line 2216 of file tcpcore.h.

◆ TCP_TIME_RETRANS

#define TCP_TIME_RETRANS   1 /* Retransmit timer */

Definition at line 2214 of file tcpcore.h.

◆ tcp_time_stamp

#define tcp_time_stamp   ((__u32)(jiffies))

Definition at line 2791 of file tcpcore.h.

◆ TCP_TIMEOUT_INIT

#define TCP_TIMEOUT_INIT   ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */

Definition at line 2130 of file tcpcore.h.

◆ TCP_TIMEWAIT_LEN

#define TCP_TIMEWAIT_LEN   (60*1000)

Definition at line 2109 of file tcpcore.h.

◆ TCP_TW_RECYCLE_SLOTS

#define TCP_TW_RECYCLE_SLOTS   (1<<TCP_TW_RECYCLE_SLOTS_LOG)

Definition at line 2155 of file tcpcore.h.

◆ TCP_TW_RECYCLE_SLOTS_LOG

#define TCP_TW_RECYCLE_SLOTS_LOG   5

Definition at line 2154 of file tcpcore.h.

◆ TCP_TW_RECYCLE_TICK

#define TCP_TW_RECYCLE_TICK   (0)

Definition at line 2182 of file tcpcore.h.

◆ TCP_TWKILL_PERIOD

#define TCP_TWKILL_PERIOD   (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)

Definition at line 2145 of file tcpcore.h.

◆ TCP_TWKILL_SLOTS

#define TCP_TWKILL_SLOTS   8 /* Please keep this a power of 2. */

Definition at line 2144 of file tcpcore.h.

◆ TCP_URG_NOTYET

#define TCP_URG_NOTYET   0x0200

Definition at line 2094 of file tcpcore.h.

◆ TCP_URG_READ

#define TCP_URG_READ   0x0400

Definition at line 2095 of file tcpcore.h.

◆ TCP_URG_VALID

#define TCP_URG_VALID   0x0100

Definition at line 2093 of file tcpcore.h.

◆ TCP_V4_ADDR_COOKIE

#define TCP_V4_ADDR_COOKIE (   __name,
  __saddr,
  __daddr 
)

Definition at line 2035 of file tcpcore.h.

◆ TCPCB_AT_TAIL

#define TCPCB_AT_TAIL   (TCPCB_URG)

Definition at line 2837 of file tcpcore.h.

◆ TCPCB_EVER_RETRANS

#define TCPCB_EVER_RETRANS   0x80 /* Ever retransmitted frame */

Definition at line 2832 of file tcpcore.h.

◆ TCPCB_FLAG_ACK

#define TCPCB_FLAG_ACK   0x10

Definition at line 2821 of file tcpcore.h.

◆ TCPCB_FLAG_CWR

#define TCPCB_FLAG_CWR   0x80

Definition at line 2824 of file tcpcore.h.

◆ TCPCB_FLAG_ECE

#define TCPCB_FLAG_ECE   0x40

Definition at line 2823 of file tcpcore.h.

◆ TCPCB_FLAG_FIN

#define TCPCB_FLAG_FIN   0x01

Definition at line 2817 of file tcpcore.h.

◆ TCPCB_FLAG_PSH

#define TCPCB_FLAG_PSH   0x08

Definition at line 2820 of file tcpcore.h.

◆ TCPCB_FLAG_RST

#define TCPCB_FLAG_RST   0x04

Definition at line 2819 of file tcpcore.h.

◆ TCPCB_FLAG_SYN

#define TCPCB_FLAG_SYN   0x02

Definition at line 2818 of file tcpcore.h.

◆ TCPCB_FLAG_URG

#define TCPCB_FLAG_URG   0x20

Definition at line 2822 of file tcpcore.h.

◆ TCPCB_LOST

#define TCPCB_LOST   0x04 /* SKB is lost */

Definition at line 2829 of file tcpcore.h.

◆ TCPCB_RETRANS

Definition at line 2833 of file tcpcore.h.

◆ TCPCB_SACKED_ACKED

#define TCPCB_SACKED_ACKED   0x01 /* SKB ACK'd by a SACK block */

Definition at line 2827 of file tcpcore.h.

◆ TCPCB_SACKED_RETRANS

#define TCPCB_SACKED_RETRANS   0x02 /* SKB retransmitted */

Definition at line 2828 of file tcpcore.h.

◆ TCPCB_TAGBITS

#define TCPCB_TAGBITS   0x07 /* All tag bits */

Definition at line 2830 of file tcpcore.h.

◆ TCPCB_URG

#define TCPCB_URG   0x20 /* Urgent pointer advenced here */

Definition at line 2835 of file tcpcore.h.

◆ TCPOLEN_MSS

#define TCPOLEN_MSS   4

Definition at line 2201 of file tcpcore.h.

◆ TCPOLEN_SACK_BASE

#define TCPOLEN_SACK_BASE   2

Definition at line 2210 of file tcpcore.h.

◆ TCPOLEN_SACK_BASE_ALIGNED

#define TCPOLEN_SACK_BASE_ALIGNED   4

Definition at line 2211 of file tcpcore.h.

◆ TCPOLEN_SACK_PERBLOCK

#define TCPOLEN_SACK_PERBLOCK   8

Definition at line 2212 of file tcpcore.h.

◆ TCPOLEN_SACK_PERM

#define TCPOLEN_SACK_PERM   2

Definition at line 2203 of file tcpcore.h.

◆ TCPOLEN_SACKPERM_ALIGNED

#define TCPOLEN_SACKPERM_ALIGNED   4

Definition at line 2209 of file tcpcore.h.

◆ TCPOLEN_TIMESTAMP

#define TCPOLEN_TIMESTAMP   10

Definition at line 2204 of file tcpcore.h.

◆ TCPOLEN_TSTAMP_ALIGNED

#define TCPOLEN_TSTAMP_ALIGNED   12

Definition at line 2207 of file tcpcore.h.

◆ TCPOLEN_WINDOW

#define TCPOLEN_WINDOW   3

Definition at line 2202 of file tcpcore.h.

◆ TCPOLEN_WSCALE_ALIGNED

#define TCPOLEN_WSCALE_ALIGNED   4

Definition at line 2208 of file tcpcore.h.

◆ TCPOPT_EOL

#define TCPOPT_EOL   0 /* End of options */

Definition at line 2190 of file tcpcore.h.

◆ TCPOPT_MSS

#define TCPOPT_MSS   2 /* Segment size negotiating */

Definition at line 2191 of file tcpcore.h.

◆ TCPOPT_NOP

#define TCPOPT_NOP   1 /* Padding */

Definition at line 2189 of file tcpcore.h.

◆ TCPOPT_SACK

#define TCPOPT_SACK   5 /* SACK Block */

Definition at line 2194 of file tcpcore.h.

◆ TCPOPT_SACK_PERM

#define TCPOPT_SACK_PERM   4 /* SACK Permitted */

Definition at line 2193 of file tcpcore.h.

◆ TCPOPT_TIMESTAMP

#define TCPOPT_TIMESTAMP   8 /* Better RTT estimations/PAWS */

Definition at line 2195 of file tcpcore.h.

◆ TCPOPT_WINDOW

#define TCPOPT_WINDOW   3 /* Window scaling */

Definition at line 2192 of file tcpcore.h.

Typedef Documentation

◆ _sockaddr

◆ sk_read_actor_t

typedef int(* sk_read_actor_t) (read_descriptor_t *, struct sk_buff *, unsigned int, size_t)

Definition at line 2615 of file tcpcore.h.

◆ skb_frag_t

Definition at line 120 of file tcpcore.h.

Enumeration Type Documentation

◆ tcp_ack_state_t

Enumerator
TCP_ACK_SCHED 
TCP_ACK_TIMER 
TCP_ACK_PUSHED 

Definition at line 2449 of file tcpcore.h.

2473{
2474 TCP_ACK_SCHED = 1,
2475 TCP_ACK_TIMER = 2,
2477};
2478
2479static __inline void tcp_schedule_ack(struct tcp_opt *tp)
2480{
2481 tp->ack.pending |= TCP_ACK_SCHED;
2482}
2483
2484static __inline int tcp_ack_scheduled(struct tcp_opt *tp)
2485{
2486 return tp->ack.pending&TCP_ACK_SCHED;
2487}
2488
2489static __inline void tcp_dec_quickack_mode(struct tcp_opt *tp)
2490{
2491 if (tp->ack.quick && --tp->ack.quick == 0) {
2492 /* Leaving quickack mode we deflate ATO. */
2493 tp->ack.ato = TCP_ATO_MIN;
2494 }
2495}
2496
2497extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
2498
2499static __inline void tcp_delack_init(struct tcp_opt *tp)
2500{
2501 memset(&tp->ack, 0, sizeof(tp->ack));
2502}
2503
2504static __inline void tcp_clear_options(struct tcp_opt *tp)
2505{
2506 tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
2507}
2508
2509enum tcp_tw_status
2510{
2511 TCP_TW_SUCCESS = 0,
2512 TCP_TW_RST = 1,
2513 TCP_TW_ACK = 2,
2514 TCP_TW_SYN = 3
2515};
2516
2517
2519 struct sk_buff *skb,
2520 struct tcphdr *th,
2521 unsigned len);
2522
2523extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
2524 struct open_request *req,
2525 struct open_request **prev);
2526extern int tcp_child_process(struct sock *parent,
2527 struct sock *child,
2528 struct sk_buff *skb);
2529extern void tcp_enter_loss(struct sock *sk, int how);
2530extern void tcp_clear_retrans(struct tcp_opt *tp);
2531extern void tcp_update_metrics(struct sock *sk);
2532
2533extern void tcp_close(struct sock *sk,
2534 long timeout);
2535extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
2536extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
2537extern void tcp_write_space(struct sock *sk);
2538
2539extern int tcp_getsockopt(struct sock *sk, int level,
2540 int optname, char *optval,
2541 int *optlen);
2542extern int tcp_setsockopt(struct sock *sk, int level,
2543 int optname, char *optval,
2544 int optlen);
2545extern void tcp_set_keepalive(struct sock *sk, int val);
2546extern int tcp_recvmsg(struct sock *sk,
2547 struct msghdr *msg,
2548 int len, int nonblock,
2549 int flags, int *addr_len);
2550
2551extern int tcp_listen_start(struct sock *sk);
2552
2553extern void tcp_parse_options(struct sk_buff *skb,
2554 struct tcp_opt *tp,
2555 int estab);
2556
2557/*
2558 * TCP v4 functions exported for the inet6 API
2559 */
2560
2561extern int tcp_v4_rebuild_header(struct sock *sk);
2562
2563extern int tcp_v4_build_header(struct sock *sk,
2564 struct sk_buff *skb);
2565
2566extern void tcp_v4_send_check(struct sock *sk,
2567 struct tcphdr *th, int len,
2568 struct sk_buff *skb);
2569
2570extern int tcp_v4_conn_request(struct sock *sk,
2571 struct sk_buff *skb);
2572
2573extern struct sock * tcp_create_openreq_child(struct sock *sk,
2574 struct open_request *req,
2575 struct sk_buff *skb);
2576
2577extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
2578 struct sk_buff *skb,
2579 struct open_request *req,
2580 struct dst_entry *dst);
2581
2582extern int tcp_v4_do_rcv(struct sock *sk,
2583 struct sk_buff *skb);
2584
2585extern int tcp_v4_connect(struct sock *sk,
2586 struct sockaddr *uaddr,
2587 int addr_len);
2588
2589extern int tcp_connect(struct sock *sk);
2590
2591extern struct sk_buff * tcp_make_synack(struct sock *sk,
2592 struct dst_entry *dst,
2593 struct open_request *req);
2594
2595extern int tcp_disconnect(struct sock *sk, int flags);
2596
2597extern void tcp_unhash(struct sock *sk);
2598
2599extern int tcp_v4_hash_connecting(struct sock *sk);
2600
2601
2602/* From syncookies.c */
2603extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
2604 struct ip_options *opt);
2605extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
2606 __u16 *mss);
2607
2608/* tcp_output.c */
2609
2610extern int tcp_write_xmit(struct sock *, int nonagle);
2611extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
2612extern void tcp_xmit_retransmit_queue(struct sock *);
2613extern void tcp_simple_retransmit(struct sock *);
2614
2615extern void tcp_send_probe0(struct sock *);
2616extern void tcp_send_partial(struct sock *);
2617extern int tcp_write_wakeup(struct sock *);
2618extern void tcp_send_fin(struct sock *sk);
2619extern void tcp_send_active_reset(struct sock *sk, int priority);
2620extern int tcp_send_synack(struct sock *);
2621extern int tcp_transmit_skb(struct sock *, struct sk_buff *);
2622extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now);
2623extern void tcp_push_one(struct sock *, unsigned mss_now);
2624extern void tcp_send_ack(struct sock *sk);
2625extern void tcp_send_delayed_ack(struct sock *sk);
2626
2627/* tcp_timer.c */
2628extern void tcp_init_xmit_timers(struct sock *);
2629extern void tcp_clear_xmit_timers(struct sock *);
2630
2631extern void tcp_delete_keepalive_timer (struct sock *);
2632extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
2633extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
2634
2635extern const char timer_bug_msg[];
2636
2637/* Read 'sendfile()'-style from a TCP socket */
2638typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
2639 unsigned int, size_t);
2640extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
2641 sk_read_actor_t recv_actor);
2642
2643static __inline void tcp_clear_xmit_timer(struct sock *sk, int what)
2644{
2645#if 0
2646 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
2647
2648 switch (what) {
2649 case TCP_TIME_RETRANS:
2650 case TCP_TIME_PROBE0:
2651 tp->pending = 0;
2652
2653#ifdef TCP_CLEAR_TIMERS
2654 if (timer_pending(&tp->retransmit_timer) &&
2655 del_timer(&tp->retransmit_timer))
2656 __sock_put(sk);
2657#endif
2658 break;
2659 case TCP_TIME_DACK:
2660 tp->ack.blocked = 0;
2661 tp->ack.pending = 0;
2662
2663#ifdef TCP_CLEAR_TIMERS
2664 if (timer_pending(&tp->delack_timer) &&
2665 del_timer(&tp->delack_timer))
2666 __sock_put(sk);
2667#endif
2668 break;
2669 default:
2671 return;
2672 };
2673#endif
2674}
2675
2676/*
2677 * Reset the retransmission timer
2678 */
2679static __inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
2680{
2681#if 0
2682 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
2683
2684 if (when > TCP_RTO_MAX) {
2685#ifdef TCP_DEBUG
2686 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
2687#endif
2688 when = TCP_RTO_MAX;
2689 }
2690
2691 switch (what) {
2692 case TCP_TIME_RETRANS:
2693 case TCP_TIME_PROBE0:
2694 tp->pending = what;
2695 tp->timeout = jiffies+when;
2696 if (!mod_timer(&tp->retransmit_timer, tp->timeout))
2697 sock_hold(sk);
2698 break;
2699
2700 case TCP_TIME_DACK:
2701 tp->ack.pending |= TCP_ACK_TIMER;
2702 tp->ack.timeout = jiffies+when;
2703 if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
2704 sock_hold(sk);
2705 break;
2706
2707 default:
2708 printk(KERN_DEBUG "bug: unknown timer value\n");
2709 };
2710#endif
2711}
2712
2713/* Compute the current effective MSS, taking SACKs and IP options,
2714 * and even PMTU discovery events into account.
2715 */
2716
2717static __inline unsigned int tcp_current_mss(struct sock *sk)
2718{
2719#if 0
2720 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
2721 struct dst_entry *dst = __sk_dst_get(sk);
2722 int mss_now = tp->mss_cache;
2723
2724 if (dst && dst->pmtu != tp->pmtu_cookie)
2725 mss_now = tcp_sync_mss(sk, dst->pmtu);
2726
2727 if (tp->eff_sacks)
2728 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
2729 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));
2730 return mss_now;
2731#else
2732 return 0;
2733#endif
2734}
2735
2736/* Initialize RCV_MSS value.
2737 * RCV_MSS is an our guess about MSS used by the peer.
2738 * We haven't any direct information about the MSS.
2739 * It's better to underestimate the RCV_MSS rather than overestimate.
2740 * Overestimations make us ACKing less frequently than needed.
2741 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
2742 */
2743
2744static __inline void tcp_initialize_rcv_mss(struct sock *sk)
2745{
2746#if 0
2747 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
2748 unsigned int hint = min(tp->advmss, tp->mss_cache);
2749
2750 hint = min(hint, tp->rcv_wnd/2);
2753
2754 tp->ack.rcv_mss = hint;
2755#endif
2756}
2757
2758static __inline void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
2759{
2760#if 0
2761 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
2763 snd_wnd);
2764#endif
2765}
2766
2767static __inline void tcp_fast_path_on(struct tcp_opt *tp)
2768{
2769#if 0
2770 __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
2771#endif
2772}
2773
2774static __inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
2775{
2776#if 0
2777 if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
2778 tp->rcv_wnd &&
2779 atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&
2780 !tp->urg_data)
2782#endif
2783}
2784
2785/* Compute the actual receive window we are currently advertising.
2786 * Rcv_nxt can be after the window if our peer push more data
2787 * than the offered window.
2788 */
2789static __inline u32 tcp_receive_window(struct tcp_opt *tp)
2790{
2791#if 0
2792 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
2793
2794 if (win < 0)
2795 win = 0;
2796 return (u32) win;
2797#else
2798 return 0;
2799#endif
2800}
2801
2802/* Choose a new window, without checks for shrinking, and without
2803 * scaling applied to the result. The caller does these things
2804 * if necessary. This is a "raw" window selection.
2805 */
2806extern u32 __tcp_select_window(struct sock *sk);
2807
2808/* TCP timestamps are only 32-bits, this causes a slight
2809 * complication on 64-bit systems since we store a snapshot
2810 * of jiffies in the buffer control blocks below. We decidedly
2811 * only use of the low 32-bits of jiffies and hide the ugly
2812 * casts with the following macro.
2813 */
2814#define tcp_time_stamp ((__u32)(jiffies))
2815
2816/* This is what the send packet queueing engine uses to pass
2817 * TCP per-packet control information to the transmission
2818 * code. We also store the host-order sequence numbers in
2819 * here too. This is 36 bytes on 32-bit architectures,
2820 * 40 bytes on 64-bit machines, if this grows please adjust
2821 * skbuff.h:skbuff->cb[xxx] size appropriately.
2822 */
2823struct tcp_skb_cb {
2824 union {
2825#if 0
2826 struct inet_skb_parm h4;
2827#endif
2828#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
2829 struct inet6_skb_parm h6;
2830#endif
2831 } header; /* For incoming frames */
2832 __u32 seq; /* Starting sequence number */
2833 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
2834 __u32 when; /* used to compute rtt's */
2835 __u8 flags; /* TCP header flags. */
2836
2837 /* NOTE: These must match up to the flags byte in a
2838 * real TCP header.
2839 */
2840#define TCPCB_FLAG_FIN 0x01
2841#define TCPCB_FLAG_SYN 0x02
2842#define TCPCB_FLAG_RST 0x04
2843#define TCPCB_FLAG_PSH 0x08
2844#define TCPCB_FLAG_ACK 0x10
2845#define TCPCB_FLAG_URG 0x20
2846#define TCPCB_FLAG_ECE 0x40
2847#define TCPCB_FLAG_CWR 0x80
2848
2849 __u8 sacked; /* State flags for SACK/FACK. */
2850#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
2851#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
2852#define TCPCB_LOST 0x04 /* SKB is lost */
2853#define TCPCB_TAGBITS 0x07 /* All tag bits */
2854
2855#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
2856#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
2857
2858#define TCPCB_URG 0x20 /* Urgent pointer advenced here */
2859
2860#define TCPCB_AT_TAIL (TCPCB_URG)
2861
2862 __u16 urg_ptr; /* Valid w/URG flags is set. */
2863 __u32 ack_seq; /* Sequence number ACK'd */
2864};
2865
2866#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
2867
2868#define for_retrans_queue(skb, sk, tp) \
2869 for (skb = (sk)->write_queue.next; \
2870 (skb != (tp)->send_head) && \
2871 (skb != (struct sk_buff *)&(sk)->write_queue); \
2872 skb=skb->next)
2873
2874
2875//#include <net/tcp_ecn.h>
2876
2877
2878/*
2879 * Compute minimal free write space needed to queue new packets.
2880 */
2881static __inline int tcp_min_write_space(struct sock *sk)
2882{
2883#if 0
2884 return sk->wmem_queued/2;
2885#else
2886return 0;
2887#endif
2888}
2889
2890static __inline int tcp_wspace(struct sock *sk)
2891{
2892#if 0
2893 return sk->sndbuf - sk->wmem_queued;
2894#else
2895return 0;
2896#endif
2897}
2898
2899
2900/* This determines how many packets are "in the network" to the best
2901 * of our knowledge. In many cases it is conservative, but where
2902 * detailed information is available from the receiver (via SACK
2903 * blocks etc.) we can make more aggressive calculations.
2904 *
2905 * Use this for decisions involving congestion control, use just
2906 * tp->packets_out to determine if the send queue is empty or not.
2907 *
2908 * Read this equation as:
2909 *
2910 * "Packets sent once on transmission queue" MINUS
2911 * "Packets left network, but not honestly ACKed yet" PLUS
2912 * "Packets fast retransmitted"
2913 */
2914static __inline unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
2915{
2916#if 0
2917 return tp->packets_out - tp->left_out + tp->retrans_out;
2918#else
2919 return 0;
2920#endif
2921}
2922
2923/* Recalculate snd_ssthresh, we want to set it to:
2924 *
2925 * one half the current congestion window, but no
2926 * less than two segments
2927 */
2928static __inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
2929{
2930#if 0
2931 return max(tp->snd_cwnd >> 1U, 2U);
2932#else
2933 return 0;
2934#endif
2935}
2936
2937/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
2938 * The exception is rate halving phase, when cwnd is decreasing towards
2939 * ssthresh.
2940 */
2941static __inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
2942{
2943#if 0
2944 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
2945 return tp->snd_ssthresh;
2946 else
2947 return max(tp->snd_ssthresh,
2948 ((tp->snd_cwnd >> 1) +
2949 (tp->snd_cwnd >> 2)));
2950#else
2951 return 0;
2952#endif
2953}
2954
2955static __inline void tcp_sync_left_out(struct tcp_opt *tp)
2956{
2957#if 0
2958 if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
2959 tp->sacked_out = tp->packets_out - tp->lost_out;
2960 tp->left_out = tp->sacked_out + tp->lost_out;
2961#endif
2962}
2963
2964extern void tcp_cwnd_application_limited(struct sock *sk);
2965
2966/* Congestion window validation. (RFC2861) */
2967
2968static __inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
2969{
2970#if 0
2971 if (tp->packets_out >= tp->snd_cwnd) {
2972 /* Network is feed fully. */
2973 tp->snd_cwnd_used = 0;
2974 tp->snd_cwnd_stamp = tcp_time_stamp;
2975 } else {
2976 /* Network starves. */
2977 if (tp->packets_out > tp->snd_cwnd_used)
2978 tp->snd_cwnd_used = tp->packets_out;
2979
2980 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
2982 }
2983#endif
2984}
2985
2986/* Set slow start threshold and cwnd not falling to slow start */
2987static __inline void __tcp_enter_cwr(struct tcp_opt *tp)
2988{
2989#if 0
2990 tp->undo_marker = 0;
2991 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
2992 tp->snd_cwnd = min(tp->snd_cwnd,
2994 tp->snd_cwnd_cnt = 0;
2995 tp->high_seq = tp->snd_nxt;
2996 tp->snd_cwnd_stamp = tcp_time_stamp;
2997 TCP_ECN_queue_cwr(tp);
2998#endif
2999}
3000
3001static __inline void tcp_enter_cwr(struct tcp_opt *tp)
3002{
3003#if 0
3004 tp->prior_ssthresh = 0;
3005 if (tp->ca_state < TCP_CA_CWR) {
3007 tp->ca_state = TCP_CA_CWR;
3008 }
3009#endif
3010}
3011
3012extern __u32 tcp_init_cwnd(struct tcp_opt *tp);
3013
3014/* Slow start with delack produces 3 packets of burst, so that
3015 * it is safe "de facto".
3016 */
3017static __inline __u32 tcp_max_burst(struct tcp_opt *tp)
3018{
3019 return 3;
3020}
3021
3022static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
3023{
3024#if 0
3025 return after(tp->snd_sml,tp->snd_una) &&
3026 !after(tp->snd_sml, tp->snd_nxt);
3027#else
3028 return 0;
3029#endif
3030}
3031
3032static __inline void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
3033{
3034#if 0
3035 if (skb->len < mss)
3036 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
3037#endif
3038}
3039
3040/* Return 0, if packet can be sent now without violation Nagle's rules:
3041 1. It is full sized.
3042 2. Or it contains FIN.
3043 3. Or TCP_NODELAY was set.
3044 4. Or TCP_CORK is not set, and all sent packets are ACKed.
3045 With Minshall's modification: all sent small packets are ACKed.
3046 */
3047
3048static __inline int
3049tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
3050{
3051#if 0
3052 return (skb->len < mss_now &&
3053 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
3054 (nonagle == 2 ||
3055 (!nonagle &&
3056 tp->packets_out &&
3058#else
3059 return 0;
3060#endif
3061}
3062
3063/* This checks if the data bearing packet SKB (usually tp->send_head)
3064 * should be put on the wire right now.
3065 */
3066static __inline int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
3067 unsigned cur_mss, int nonagle)
3068{
3069#if 0
3070 /* RFC 1122 - section 4.2.3.4
3071 *
3072 * We must queue if
3073 *
3074 * a) The right edge of this frame exceeds the window
3075 * b) There are packets in flight and we have a small segment
3076 * [SWS avoidance and Nagle algorithm]
3077 * (part of SWS is done on packetization)
3078 * Minshall version sounds: there are no _small_
3079 * segments in flight. (tcp_nagle_check)
3080 * c) We have too many packets 'in flight'
3081 *
3082 * Don't use the nagle rule for urgent data (or
3083 * for the final FIN -DaveM).
3084 *
3085 * Also, Nagle rule does not apply to frames, which
3086 * sit in the middle of queue (they have no chances
3087 * to get new data) and if room at tail of skb is
3088 * not enough to save something seriously (<32 for now).
3089 */
3090
3091 /* Don't be strict about the congestion window for the
3092 * final FIN frame. -DaveM
3093 */
3094 return ((nonagle==1 || tp->urg_mode
3095 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
3096 ((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
3097 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
3098 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
3099#else
3100 return 0;
3101#endif
3102}
3103
3104static __inline void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
3105{
3106#if 0
3107 if (!tp->packets_out && !tp->pending)
3109#endif
3110}
3111
3112static __inline int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
3113{
3114#if 0
3115 return (skb->next == (struct sk_buff*)&sk->write_queue);
3116#else
3117 return 0;
3118#endif
3119}
3120
3121/* Push out any pending frames which were held back due to
3122 * TCP_CORK or attempt at coalescing tiny packets.
3123 * The socket must be locked by the caller.
3124 */
3125static __inline void __tcp_push_pending_frames(struct sock *sk,
3126 struct tcp_opt *tp,
3127 unsigned cur_mss,
3128 int nonagle)
3129{
3130#if 0
3131 struct sk_buff *skb = tp->send_head;
3132
3133 if (skb) {
3134 if (!tcp_skb_is_last(sk, skb))
3135 nonagle = 1;
3136 if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
3137 tcp_write_xmit(sk, nonagle))
3139 }
3141#endif
3142}
3143
3144static __inline void tcp_push_pending_frames(struct sock *sk,
3145 struct tcp_opt *tp)
3146{
3147#if 0
3149#endif
3150}
3151
3152static __inline int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
3153{
3154#if 0
3155 struct sk_buff *skb = tp->send_head;
3156
3157 return (skb &&
3159 tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));
3160#else
3161 return 0;
3162#endif
3163}
3164
3165static __inline void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
3166{
3167#if 0
3168 tp->snd_wl1 = seq;
3169#endif
3170}
3171
3172static __inline void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
3173{
3174#if 0
3175 tp->snd_wl1 = seq;
3176#endif
3177}
3178
3179extern void tcp_destroy_sock(struct sock *sk);
3180
3181
3182/*
3183 * Calculate(/check) TCP checksum
3184 */
3185static __inline u16 tcp_v4_check(struct tcphdr *th, int len,
3186 unsigned long saddr, unsigned long daddr,
3187 unsigned long base)
3188{
3189#if 0
3190 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
3191#else
3192 return 0;
3193#endif
3194}
3195
3196static __inline int __tcp_checksum_complete(struct sk_buff *skb)
3197{
3198#if 0
3199 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
3200#else
3201 return 0;
3202#endif
3203}
3204
3205static __inline int tcp_checksum_complete(struct sk_buff *skb)
3206{
3207#if 0
3208 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
3210#else
3211 return 0;
3212#endif
3213}
3214
3215/* Prequeue for VJ style copy to user, combined with checksumming. */
3216
3217static __inline void tcp_prequeue_init(struct tcp_opt *tp)
3218{
3219#if 0
3220 tp->ucopy.task = NULL;
3221 tp->ucopy.len = 0;
3222 tp->ucopy.memory = 0;
3223 skb_queue_head_init(&tp->ucopy.prequeue);
3224#endif
3225}
3226
3227/* Packet is added to VJ-style prequeue for processing in process
3228 * context, if a reader task is waiting. Apparently, this exciting
3229 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
3230 * failed somewhere. Latency? Burstiness? Well, at least now we will
3231 * see, why it failed. 8)8) --ANK
3232 *
3233 * NOTE: is this not too big to inline?
3234 */
3235static __inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
3236{
3237#if 0
3238 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
3239
3240 if (tp->ucopy.task) {
3241 __skb_queue_tail(&tp->ucopy.prequeue, skb);
3242 tp->ucopy.memory += skb->truesize;
3243 if (tp->ucopy.memory > sk->rcvbuf) {
3244 struct sk_buff *skb1;
3245
3246 if (sk->lock.users)
3247 out_of_line_bug();
3248
3249 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
3250 sk->backlog_rcv(sk, skb1);
3251 NET_INC_STATS_BH(TCPPrequeueDropped);
3252 }
3253
3254 tp->ucopy.memory = 0;
3255 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
3256 wake_up_interruptible(sk->sleep);
3257 if (!tcp_ack_scheduled(tp))
3259 }
3260 return 1;
3261 }
3262 return 0;
3263#else
3264 return 0;
3265#endif
3266}
3267
3268
3269#undef STATE_TRACE
3270
3271#ifdef STATE_TRACE
3272static char *statename[]={
3273 "Unused","Established","Syn Sent","Syn Recv",
3274 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
3275 "Close Wait","Last ACK","Listen","Closing"
3276};
3277#endif
3278
3279static __inline void tcp_set_state(struct sock *sk, int state)
3280{
3281#if 0
3282 int oldstate = sk->state;
3283
3284 switch (state) {
3285 case TCP_ESTABLISHED:
3286 if (oldstate != TCP_ESTABLISHED)
3287 TCP_INC_STATS(TcpCurrEstab);
3288 break;
3289
3290 case TCP_CLOSE:
3291 sk->prot->unhash(sk);
3292 if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
3294 /* fall through */
3295 default:
3296 if (oldstate==TCP_ESTABLISHED)
3297 tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
3298 }
3299
3300 /* Change state AFTER socket is unhashed to avoid closed
3301 * socket sitting in hash tables.
3302 */
3303 sk->state = state;
3304
3305#ifdef STATE_TRACE
3306 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
3307#endif
3308#endif
3309}
3310
3311static __inline void tcp_done(struct sock *sk)
3312{
3313#if 0
3316
3317 sk->shutdown = SHUTDOWN_MASK;
3318
3319 if (!sk->dead)
3320 sk->state_change(sk);
3321 else
3323#endif
3324}
3325
3326static __inline void tcp_sack_reset(struct tcp_opt *tp)
3327{
3328#if 0
3329 tp->dsack = 0;
3330 tp->eff_sacks = 0;
3331 tp->num_sacks = 0;
3332#endif
3333}
3334
3335static __inline void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
3336{
3337#if 0
3338 if (tp->tstamp_ok) {
3339 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
3340 (TCPOPT_NOP << 16) |
3341 (TCPOPT_TIMESTAMP << 8) |
3343 *ptr++ = htonl(tstamp);
3344 *ptr++ = htonl(tp->ts_recent);
3345 }
3346 if (tp->eff_sacks) {
3347 struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
3348 int this_sack;
3349
3350 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
3351 (TCPOPT_NOP << 16) |
3352 (TCPOPT_SACK << 8) |
3354 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
3355 for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
3356 *ptr++ = htonl(sp[this_sack].start_seq);
3357 *ptr++ = htonl(sp[this_sack].end_seq);
3358 }
3359 if (tp->dsack) {
3360 tp->dsack = 0;
3361 tp->eff_sacks--;
3362 }
3363 }
3364#endif
3365}
3366
3367/* Construct a tcp options header for a SYN or SYN_ACK packet.
3368 * If this is every changed make sure to change the definition of
3369 * MAX_SYN_SIZE to match the new maximum number of options that you
3370 * can generate.
3371 */
3372static __inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
3373 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
3374{
3375#if 0
3376 /* We always get an MSS option.
3377 * The option bytes which will be seen in normal data
3378 * packets should timestamps be used, must be in the MSS
3379 * advertised. But we subtract them from tp->mss_cache so
3380 * that calculations in tcp_sendmsg are simpler etc.
3381 * So account for this fact here if necessary. If we
3382 * don't do this correctly, as a receiver we won't
3383 * recognize data packets as being full sized when we
3384 * should, and thus we won't abide by the delayed ACK
3385 * rules correctly.
3386 * SACKs don't matter, we never delay an ACK when we
3387 * have any of those going out.
3388 */
3389 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
3390 if (ts) {
3391 if(sack)
3394 else
3395 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
3397 *ptr++ = htonl(tstamp); /* TSVAL */
3398 *ptr++ = htonl(ts_recent); /* TSECR */
3399 } else if(sack)
3400 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
3402 if (offer_wscale)
3403 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
3404#endif
3405}
3406
3407/* Determine a window scaling and initial window to offer.
3408 * Based on the assumption that the given amount of space
3409 * will be offered. Store the results in the tp structure.
3410 * NOTE: for smooth operation initial space offering should
3411 * be a multiple of mss if possible. We assume here that mss >= 1.
3412 * This MUST be enforced by all callers.
3413 */
3414static __inline void tcp_select_initial_window(int __space, __u32 mss,
3415 __u32 *rcv_wnd,
3416 __u32 *window_clamp,
3417 int wscale_ok,
3418 __u8 *rcv_wscale)
3419{
3420#if 0
3421 unsigned int space = (__space < 0 ? 0 : __space);
3422
3423 /* If no clamp set the clamp to the max possible scaled window */
3424 if (*window_clamp == 0)
3425 (*window_clamp) = (65535 << 14);
3426 space = min(*window_clamp, space);
3427
3428 /* Quantize space offering to a multiple of mss if possible. */
3429 if (space > mss)
3430 space = (space / mss) * mss;
3431
3432 /* NOTE: offering an initial window larger than 32767
3433 * will break some buggy TCP stacks. We try to be nice.
3434 * If we are not window scaling, then this truncates
3435 * our initial window offering to 32k. There should also
3436 * be a sysctl option to stop being nice.
3437 */
3438 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
3439 (*rcv_wscale) = 0;
3440 if (wscale_ok) {
3441 /* See RFC1323 for an explanation of the limit to 14 */
3442 while (space > 65535 && (*rcv_wscale) < 14) {
3443 space >>= 1;
3444 (*rcv_wscale)++;
3445 }
3446 if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
3447 space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
3448 (*rcv_wscale)--;
3449 }
3450
3451 /* Set initial window to value enough for senders,
3452 * following RFC1414. Senders, not following this RFC,
3453 * will be satisfied with 2.
3454 */
3455 if (mss > (1<<*rcv_wscale)) {
3456 int init_cwnd = 4;
3457 if (mss > 1460*3)
3458 init_cwnd = 2;
3459 else if (mss > 1460)
3460 init_cwnd = 3;
3461 if (*rcv_wnd > init_cwnd*mss)
3462 *rcv_wnd = init_cwnd*mss;
3463 }
3464 /* Set the clamp no higher than max representable value */
3465 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
3466#endif
3467}
3468
3469static __inline int tcp_win_from_space(int space)
3470{
3471#if 0
3472 return sysctl_tcp_adv_win_scale<=0 ?
3473 (space>>(-sysctl_tcp_adv_win_scale)) :
3474 space - (space>>sysctl_tcp_adv_win_scale);
3475#else
3476 return 0;
3477#endif
3478}
3479
3480/* Note: caller must be prepared to deal with negative returns */
3481static __inline int tcp_space(struct sock *sk)
3482{
3483#if 0
3484 return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));
3485#else
3486 return 0;
3487#endif
3488}
3489
3490static __inline int tcp_full_space( struct sock *sk)
3491{
3492#if 0
3493 return tcp_win_from_space(sk->rcvbuf);
3494#else
3495 return 0;
3496#endif
3497}
3498
3499static __inline void tcp_acceptq_removed(struct sock *sk)
3500{
3501#if 0
3502 sk->ack_backlog--;
3503#endif
3504}
3505
3506static __inline void tcp_acceptq_added(struct sock *sk)
3507{
3508#if 0
3509 sk->ack_backlog++;
3510#endif
3511}
3512
3513static __inline int tcp_acceptq_is_full(struct sock *sk)
3514{
3515#if 0
3516 return sk->ack_backlog > sk->max_ack_backlog;
3517#else
3518 return 0;
3519#endif
3520}
3521
3522static __inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
3523 struct sock *child)
3524{
3525#if 0
3526 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
3527
3528 req->sk = child;
3530
3531 if (!tp->accept_queue_tail) {
3532 tp->accept_queue = req;
3533 } else {
3534 tp->accept_queue_tail->dl_next = req;
3535 }
3536 tp->accept_queue_tail = req;
3537 req->dl_next = NULL;
3538#endif
3539}
3540
3541struct tcp_listen_opt
3542{
3543 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
3544 int qlen;
3545 int qlen_young;
3546 int clock_hand;
3548};
3549
3550static __inline void
3551tcp_synq_removed(struct sock *sk, struct open_request *req)
3552{
3553#if 0
3554 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
3555
3556 if (--lopt->qlen == 0)
3558 if (req->retrans == 0)
3559 lopt->qlen_young--;
3560#endif
3561}
3562
3563static __inline void tcp_synq_added(struct sock *sk)
3564{
3565#if 0
3566 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
3567
3568 if (lopt->qlen++ == 0)
3570 lopt->qlen_young++;
3571#endif
3572}
3573
3574static __inline int tcp_synq_len(struct sock *sk)
3575{
3576#if 0
3577 return sk->tp_pinfo.af_tcp.listen_opt->qlen;
3578#else
3579 return 0;
3580#endif
3581}
3582
3583static __inline int tcp_synq_young(struct sock *sk)
3584{
3585#if 0
3586 return sk->tp_pinfo.af_tcp.listen_opt->qlen_young;
3587#else
3588 return 0;
3589#endif
3590}
3591
3592static __inline int tcp_synq_is_full(struct sock *sk)
3593{
3594#if 0
3595 return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log;
3596#else
3597 return 0;
3598#endif
3599}
3600
3601static __inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
3602 struct open_request **prev)
3603{
3604#if 0
3605 write_lock(&tp->syn_wait_lock);
3606 *prev = req->dl_next;
3607 write_unlock(&tp->syn_wait_lock);
3608#endif
3609}
3610
3611static __inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
3612 struct open_request **prev)
3613{
3614#if 0
3615 tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev);
3616 tcp_synq_removed(sk, req);
3617 tcp_openreq_free(req);
3618#endif
3619}
3620
3621static __inline void tcp_openreq_init(struct open_request *req,
3622 struct tcp_opt *tp,
3623 struct sk_buff *skb)
3624{
3625#if 0
3626 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
3627 req->rcv_isn = TCP_SKB_CB(skb)->seq;
3628 req->mss = tp->mss_clamp;
3629 req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
3630 req->tstamp_ok = tp->tstamp_ok;
3631 req->sack_ok = tp->sack_ok;
3632 req->snd_wscale = tp->snd_wscale;
3633 req->wscale_ok = tp->wscale_ok;
3634 req->acked = 0;
3635 req->ecn_ok = 0;
3636 req->rmt_port = skb->h.th->source;
3637#endif
3638}
3639
3640#define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
3641
3642static __inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
3643{
3644#if 0
3645 sk->tp_pinfo.af_tcp.queue_shrunk = 1;
3646 sk->wmem_queued -= skb->truesize;
3647 sk->forward_alloc += skb->truesize;
3648 __kfree_skb(skb);
3649#endif
3650}
3651
3652static __inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
3653{
3654#if 0
3655 sk->wmem_queued += skb->truesize;
3656 sk->forward_alloc -= skb->truesize;
3657#endif
3658}
3659
3660extern void __tcp_mem_reclaim(struct sock *sk);
3661extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
3662
3663static __inline void tcp_mem_reclaim(struct sock *sk)
3664{
3665#if 0
3666 if (sk->forward_alloc >= TCP_MEM_QUANTUM)
3668#endif
3669}
3670
3671static __inline void tcp_enter_memory_pressure(void)
3672{
3673#if 0
3674 if (!tcp_memory_pressure) {
3675 NET_INC_STATS(TCPMemoryPressures);
3677 }
3678#endif
3679}
3680
3681static __inline void tcp_moderate_sndbuf(struct sock *sk)
3682{
3683#if 0
3684 if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
3685 sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
3686 sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
3687 }
3688#endif
3689}
3690
3691static __inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
3692{
3693#if 0
3694 struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
3695
3696 if (skb) {
3697 skb->truesize += mem;
3698 if (sk->forward_alloc >= (int)skb->truesize ||
3699 tcp_mem_schedule(sk, skb->truesize, 0)) {
3701 return skb;
3702 }
3703 __kfree_skb(skb);
3704 } else {
3707 }
3708 return NULL;
3709#else
3710 return NULL;
3711#endif
3712}
3713
3714static __inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
3715{
3716#if 0
3717 return tcp_alloc_pskb(sk, size, 0, gfp);
3718#else
3719 return NULL;
3720#endif
3721}
3722
3723static __inline struct page * tcp_alloc_page(struct sock *sk)
3724{
3725#if 0
3726 if (sk->forward_alloc >= (int)PAGE_SIZE ||
3727 tcp_mem_schedule(sk, PAGE_SIZE, 0)) {
3728 struct page *page = alloc_pages(sk->allocation, 0);
3729 if (page)
3730 return page;
3731 }
3734 return NULL;
3735#else
3736 return NULL;
3737#endif
3738}
3739
3740static __inline void tcp_writequeue_purge(struct sock *sk)
3741{
3742#if 0
3743 struct sk_buff *skb;
3744
3745 while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)
3746 tcp_free_skb(sk, skb);
3748#endif
3749}
3750
3751extern void tcp_rfree(struct sk_buff *skb);
3752
3753static __inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
3754{
3755#if 0
3756 skb->sk = sk;
3757 skb->destructor = tcp_rfree;
3758 atomic_add(skb->truesize, &sk->rmem_alloc);
3759 sk->forward_alloc -= skb->truesize;
3760#endif
3761}
3762
3763extern void tcp_listen_wlock(void);
3764
3765/* - We may sleep inside this lock.
3766 * - If sleeping is not required (or called from BH),
3767 * use plain read_(un)lock(&tcp_lhash_lock).
3768 */
3769
3770static __inline void tcp_listen_lock(void)
3771{
3772#if 0
3773 /* read_lock synchronizes to candidates to writers */
3774 read_lock(&tcp_lhash_lock);
3776 read_unlock(&tcp_lhash_lock);
3777#endif
3778}
3779
3780static __inline void tcp_listen_unlock(void)
3781{
3782#if 0
3785#endif
3786}
3787
3788static __inline int keepalive_intvl_when(struct tcp_opt *tp)
3789{
3790#if 0
3791 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
3792#else
3793 return 0;
3794#endif
3795}
3796
3797static __inline int keepalive_time_when(struct tcp_opt *tp)
3798{
3799#if 0
3800 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
3801#else
3802 return 0;
3803#endif
3804}
3805
3806static __inline int tcp_fin_time(struct tcp_opt *tp)
3807{
3808#if 0
3809 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
3810
3811 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
3812 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
3813
3814 return fin_timeout;
3815#else
3816 return 0;
3817#endif
3818}
3819
3820static __inline int tcp_paws_check(struct tcp_opt *tp, int rst)
3821{
3822#if 0
3823 if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
3824 return 0;
3825 if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
3826 return 0;
3827
3828 /* RST segments are not recommended to carry timestamp,
3829 and, if they do, it is recommended to ignore PAWS because
3830 "their cleanup function should take precedence over timestamps."
3831 Certainly, it is mistake. It is necessary to understand the reasons
3832 of this constraint to relax it: if peer reboots, clock may go
3833 out-of-sync and half-open connections will not be reset.
3834 Actually, the problem would be not existing if all
3835 the implementations followed draft about maintaining clock
3836 via reboots. Linux-2.2 DOES NOT!
3837
3838 However, we can relax time bounds for RST segments to MSL.
3839 */
3840 if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
3841 return 0;
3842 return 1;
3843#else
3844 return 0;
3845#endif
3846}
3847
3848#define TCP_CHECK_TIMER(sk) do { } while (0)
3849
3850#endif /* __TCPCORE_H */
3851
3852
3853//
3854#endif
static int nonblock(int fd, int isnonblock)
Definition: adnsresfilter.c:86
static int state
Definition: maze.c:121
#define atomic_read(v)
Definition: atomic.h:23
static void atomic_inc(atomic_t volatile *v)
Definition: atomic.h:95
static void atomic_add(int volatile i, atomic_t volatile *v)
Definition: atomic.h:43
static int atomic_dec_and_test(atomic_t volatile *v)
Definition: atomic.h:121
#define msg(x)
Definition: auth_time.c:54
void tcp_disconnect(void)
Definition: tcp.c:832
#define TCP_CLOSE(_sck)
Definition: tcp.c:35
u16 __u16
Definition: btrfs.h:18
u8 __u8
Definition: btrfs.h:17
ULONG32 u32
Definition: btrfs.h:14
UCHAR u8
Definition: btrfs.h:12
USHORT u16
Definition: btrfs.h:13
#define NULL
Definition: types.h:112
unsigned int(__cdecl typeof(jpeg_read_scanlines))(struct jpeg_decompress_struct *
Definition: typeof.h:31
r parent
Definition: btrfs.c:3010
#define IPPROTO_TCP
Definition: ip.h:196
__kernel_size_t size_t
Definition: linux.h:237
signed int s32
Definition: linux.h:57
#define PAGE_SIZE
Definition: env_spec_w32.h:49
GLint level
Definition: gl.h:1546
GLsizeiptr size
Definition: glext.h:5919
GLenum GLenum dst
Definition: glext.h:6340
GLbitfield flags
Definition: glext.h:7161
GLuint GLfloat * val
Definition: glext.h:7180
GLenum GLsizei len
Definition: glext.h:6722
static real win[4][36]
#define KERN_DEBUG
Definition: module.h:229
#define __constant_htonl(x)
Definition: module.h:92
#define printk
Definition: module.h:231
int wake_up(wait_queue_head_t *queue)
Definition: linux.c:279
#define ntohl(x)
Definition: module.h:205
#define jiffies
Definition: module.h:1085
#define htonl(x)
Definition: module.h:214
static PVOID ptr
Definition: dispmode.c:27
static const WCHAR desc[]
Definition: protectdata.c:36
static const WCHAR sp[]
Definition: suminfo.c:287
static HWND child
Definition: cursoricon.c:298
static int priority
Definition: timer.c:163
#define min(a, b)
Definition: monoChain.cc:55
#define err(...)
#define memset(x, y, z)
Definition: compat.h:39
SOCKET WSAAPI socket(IN INT af, IN INT type, IN INT protocol)
Definition: socklife.c:143
Definition: tcpcore.h:1673
Definition: fci.c:127
Definition: mem.c:349
__u16 snd_wscale
Definition: tcpcore.h:2292
__u32 rcv_isn
Definition: tcpcore.h:2286
__u16 ecn_ok
Definition: tcpcore.h:2297
__u16 sack_ok
Definition: tcpcore.h:2295
__u16 rmt_port
Definition: tcpcore.h:2288
struct sock * sk
Definition: tcpcore.h:2305
__u32 rcv_wnd
Definition: tcpcore.h:2301
struct open_request * dl_next
Definition: tcpcore.h:2285
__u16 wscale_ok
Definition: tcpcore.h:2296
__u16 acked
Definition: tcpcore.h:2298
__u16 tstamp_ok
Definition: tcpcore.h:2294
__u32 ts_recent
Definition: tcpcore.h:2302
__u8 retrans
Definition: tcpcore.h:2290
__u16 mss
Definition: tcpcore.h:2289
Definition: module.h:576
struct sk_buff * next
Definition: tcpcore.h:141
unsigned int csum
Definition: tcpcore.h:190
struct sock * sk
Definition: tcpcore.h:145
struct tcphdr * th
Definition: tcpcore.h:152
union sk_buff::@1016 h
unsigned char ip_summed
Definition: tcpcore.h:194
unsigned int len
Definition: tcpcore.h:188
unsigned int truesize
Definition: tcpcore.h:199
void(* destructor)(struct sk_buff *)
Definition: tcpcore.h:206
Definition: tcpcore.h:1455
struct sock * prev
Definition: tcpcore.h:1494
unsigned char userlocks
Definition: tcpcore.h:1511
unsigned int allocation
Definition: tcpcore.h:1492
struct tcp_opt af_tcp
Definition: tcpcore.h:1543
int forward_alloc
Definition: tcpcore.h:1490
unsigned short ack_backlog
Definition: tcpcore.h:1560
int wmem_queued
Definition: tcpcore.h:1489
unsigned short max_ack_backlog
Definition: tcpcore.h:1561
struct sk_buff_head write_queue
Definition: tcpcore.h:1487
int sndbuf
Definition: tcpcore.h:1493
union sock::@1023 tp_pinfo
atomic_t rmem_alloc
Definition: tcpcore.h:1484
int rcvbuf
Definition: tcpcore.h:1479
struct open_request * syn_table[TCP_SYNQ_HSIZE]
Definition: tcpcore.h:3524
__u32 snd_wnd
Definition: tcpcore.h:1312
__u32 end_seq
Definition: tcpcore.h:1263
__u32 start_seq
Definition: tcpcore.h:1262
__u32 when
Definition: tcpcore.h:2811
__u32 seq
Definition: tcpcore.h:2809
__u8 sacked
Definition: tcpcore.h:2826
__u16 urg_ptr
Definition: tcpcore.h:2839
__u8 flags
Definition: tcpcore.h:2812
__u32 end_seq
Definition: tcpcore.h:2810
__u32 ack_seq
Definition: tcpcore.h:2840
union tcp_skb_cb::@1026 header
Definition: tcpdef.h:22
Definition: dhcpd.h:245
#define max(a, b)
Definition: svc.c:63
static __inline void skb_queue_head_init(struct sk_buff_head *list)
Definition: tcpcore.h:450
static __inline int tcp_fin_time(struct tcp_opt *tp)
Definition: tcpcore.h:3783
void tcp_send_delayed_ack(struct sock *sk)
#define TCP_TIMEOUT_INIT
Definition: tcpcore.h:2130
static __inline void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
Definition: tcpcore.h:3081
#define TCPOPT_MSS
Definition: tcpcore.h:2191
#define TCPOPT_SACK
Definition: tcpcore.h:2194
__u32 tcp_init_cwnd(struct tcp_opt *tp)
struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct open_request *req, struct dst_entry *dst)
#define TCP_ATO_MIN
Definition: tcpcore.h:2126
#define TCPOLEN_SACK_BASE
Definition: tcpcore.h:2210
void tcp_cwnd_application_limited(struct sock *sk)
static __inline void tcp_openreq_free(struct open_request *req)
Definition: tcpcore.h:2320
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
void tcp_update_metrics(struct sock *sk)
int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
#define TCP_SYNQ_HSIZE
Definition: tcpcore.h:2148
void tcp_parse_options(struct sk_buff *skb, struct tcp_opt *tp, int estab)
static __inline int tcp_ack_scheduled(struct tcp_opt *tp)
Definition: tcpcore.h:2461
struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct open_request *req)
int tcp_v4_build_header(struct sock *sk, struct sk_buff *skb)
int tcp_v4_hash_connecting(struct sock *sk)
static __inline void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale)
Definition: tcpcore.h:3391
static __inline void tcp_schedule_ack(struct tcp_opt *tp)
Definition: tcpcore.h:2456
static __inline int tcp_space(struct sock *sk)
Definition: tcpcore.h:3458
static __inline __u32 skb_queue_len(struct sk_buff_head *list_)
Definition: tcpcore.h:445
static __inline void tcp_prequeue_init(struct tcp_opt *tp)
Definition: tcpcore.h:3194
#define TCPOLEN_SACK_PERBLOCK
Definition: tcpcore.h:2212
#define TCP_SKB_CB(__skb)
Definition: tcpcore.h:2843
int tcp_retransmit_skb(struct sock *, struct sk_buff *)
#define TCPOPT_SACK_PERM
Definition: tcpcore.h:2193
void tcp_init_xmit_timers(struct sock *)
int(* sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t)
Definition: tcpcore.h:2615
#define tcp_time_stamp
Definition: tcpcore.h:2791
int tcp_write_xmit(struct sock *, int nonagle)
int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
#define TCP_PAWS_MSL
Definition: tcpcore.h:2151
#define TCP_TIME_PROBE0
Definition: tcpcore.h:2216
static __inline int tcp_acceptq_is_full(struct sock *sk)
Definition: tcpcore.h:3490
void tcp_rfree(struct sk_buff *skb)
static __inline struct page * tcp_alloc_page(struct sock *sk)
Definition: tcpcore.h:3700
void tcp_unhash(struct sock *sk)
#define TCP_RTO_MIN
Definition: tcpcore.h:2129
static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
Definition: tcpcore.h:2999
#define TCPOPT_TIMESTAMP
Definition: tcpcore.h:2195
#define TCP_RTO_MAX
Definition: tcpcore.h:2128
#define tcp_lhash_wait
Definition: tcpcore.h:1932
void tcp_xmit_retransmit_queue(struct sock *)
static __inline u16 tcp_v4_check(struct tcphdr *th, int len, unsigned long saddr, unsigned long daddr, unsigned long base)
Definition: tcpcore.h:3162
const char timer_bug_msg[]
static __inline int tcp_win_from_space(int space)
Definition: tcpcore.h:3446
static __inline void tcp_sack_reset(struct tcp_opt *tp)
Definition: tcpcore.h:3303
void tcp_send_probe0(struct sock *)
void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now)
#define TCPOLEN_TIMESTAMP
Definition: tcpcore.h:2204
int tcp_transmit_skb(struct sock *, struct sk_buff *)
#define tcp_lhash_lock
Definition: tcpcore.h:1930
int tcp_send_synack(struct sock *)
#define TCPOPT_WINDOW
Definition: tcpcore.h:2192
static __inline u32 tcp_receive_window(struct tcp_opt *tp)
Definition: tcpcore.h:2766
void tcp_close(struct sock *sk, long timeout)
#define TCP_TIME_RETRANS
Definition: tcpcore.h:2214
static __inline __u32 tcp_max_burst(struct tcp_opt *tp)
Definition: tcpcore.h:2994
int tcp_connect(struct sock *sk)
static __inline int __tcp_checksum_complete(struct sk_buff *skb)
Definition: tcpcore.h:3173
unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum)
tcp_tw_status
Definition: tcpcore.h:2487
@ TCP_TW_RST
Definition: tcpcore.h:2489
@ TCP_TW_SUCCESS
Definition: tcpcore.h:2488
@ TCP_TW_SYN
Definition: tcpcore.h:2491
@ TCP_TW_ACK
Definition: tcpcore.h:2490
int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb)
static __inline void tcp_acceptq_added(struct sock *sk)
Definition: tcpcore.h:3483
static __inline void tcp_clear_xmit_timer(struct sock *sk, int what)
Definition: tcpcore.h:2620
static __inline struct sk_buff * __skb_dequeue(struct sk_buff_head *list)
Definition: tcpcore.h:568
#define TCP_TIME_DACK
Definition: tcpcore.h:2215
struct sock * cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt)
static __inline void tcp_set_state(struct sock *sk, int state)
Definition: tcpcore.h:3256
void tcp_enter_loss(struct sock *sk, int how)
static __inline void tcp_delack_init(struct tcp_opt *tp)
Definition: tcpcore.h:2476
#define TCP_MIN_RCVMSS
Definition: tcpcore.h:2081
static __inline struct sk_buff * tcp_alloc_skb(struct sock *sk, int size, int gfp)
Definition: tcpcore.h:3691
void tcp_simple_retransmit(struct sock *)
static __inline void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
Definition: tcpcore.h:3142
#define MAX_TCP_WINDOW
Definition: tcpcore.h:2075
#define tcp_lhash_users
Definition: tcpcore.h:1931
static __inline int keepalive_time_when(struct tcp_opt *tp)
Definition: tcpcore.h:3774
static __inline void tcp_listen_lock(void)
Definition: tcpcore.h:3747
static __inline int tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
Definition: tcpcore.h:3026
static __inline int tcp_synq_is_full(struct sock *sk)
Definition: tcpcore.h:3569
struct sock * tcp_accept(struct sock *sk, int flags, int *err)
struct sk_buff * alloc_skb(unsigned int size, int priority)
static __inline struct sk_buff * tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
Definition: tcpcore.h:3668
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, int len, int nonblock, int flags, int *addr_len)
int tcp_listen_start(struct sock *sk)
static __inline void tcp_enter_cwr(struct tcp_opt *tp)
Definition: tcpcore.h:2978
__inline int after(__u32 seq1, __u32 seq2)
Definition: tcpcore.h:2395
#define TCPOLEN_WINDOW
Definition: tcpcore.h:2202
int tcp_write_wakeup(struct sock *)
#define TCP_PAWS_24DAYS
Definition: tcpcore.h:2150
#define TCPOLEN_SACK_PERM
Definition: tcpcore.h:2203
static __inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
Definition: tcpcore.h:2656
static __inline void tcp_sync_left_out(struct tcp_opt *tp)
Definition: tcpcore.h:2932
static __inline void tcp_done(struct sock *sk)
Definition: tcpcore.h:3288
static __inline int tcp_paws_check(struct tcp_opt *tp, int rst)
Definition: tcpcore.h:3797
struct sock * tcp_check_req(struct sock *sk, struct sk_buff *skb, struct open_request *req, struct open_request **prev)
#define TCPOLEN_SACK_BASE_ALIGNED
Definition: tcpcore.h:2211
static __inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
Definition: tcpcore.h:2905
static __inline int tcp_full_space(struct sock *sk)
Definition: tcpcore.h:3467
static __inline void tcp_synq_added(struct sock *sk)
Definition: tcpcore.h:3540
#define TCPCB_FLAG_FIN
Definition: tcpcore.h:2817
static __inline void tcp_acceptq_removed(struct sock *sk)
Definition: tcpcore.h:3476
void __tcp_mem_reclaim(struct sock *sk)
void tcp_put_port(struct sock *sk)
static __inline void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
Definition: tcpcore.h:3009
static __inline void tcp_synq_removed(struct sock *sk, struct open_request *req)
Definition: tcpcore.h:3528
static __inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
Definition: tcpcore.h:3619
static __inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
Definition: tcpcore.h:2945
static __inline void skb_reserve(struct sk_buff *skb, unsigned int len)
Definition: tcpcore.h:927
static __inline void tcp_clear_options(struct tcp_opt *tp)
Definition: tcpcore.h:2481
void tcp_send_fin(struct sock *sk)
static __inline int tcp_wspace(struct sock *sk)
Definition: tcpcore.h:2867
#define TCP_MEM_QUANTUM
Definition: tcpcore.h:3617
static __inline int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
Definition: tcpcore.h:3129
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@ TCP_ACK_SCHED
Definition: tcpcore.h:2451
@ TCP_ACK_TIMER
Definition: tcpcore.h:2452
@ TCP_ACK_PUSHED
Definition: tcpcore.h:2453
static __inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
Definition: tcpcore.h:3730
static __inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Definition: tcpcore.h:3212
static __inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req, struct open_request **prev)
Definition: tcpcore.h:3578
static __inline int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
Definition: tcpcore.h:3089
int tcp_v4_rebuild_header(struct sock *sk)
static __inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, struct sock *child)
Definition: tcpcore.h:3499
void tcp_listen_wlock(void)
static __inline void __tcp_push_pending_frames(struct sock *sk, struct tcp_opt *tp, unsigned cur_mss, int nonagle)
Definition: tcpcore.h:3102
static __inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
Definition: tcpcore.h:2751
static __inline void tcp_push_pending_frames(struct sock *sk, struct tcp_opt *tp)
Definition: tcpcore.h:3121
static __inline void tcp_openreq_init(struct open_request *req, struct tcp_opt *tp, struct sk_buff *skb)
Definition: tcpcore.h:3598
static __inline void __tcp_enter_cwr(struct tcp_opt *tp)
Definition: tcpcore.h:2964
static __inline unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
Definition: tcpcore.h:2891
void tcp_write_space(struct sock *sk)
void tcp_clear_retrans(struct tcp_opt *tp)
static __inline int tcp_synq_len(struct sock *sk)
Definition: tcpcore.h:3551
static __inline int tcp_synq_young(struct sock *sk)
Definition: tcpcore.h:3560
static __inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
Definition: tcpcore.h:3349
static __inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
Definition: tcpcore.h:524
static __inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
Definition: tcpcore.h:2918
static __inline void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
Definition: tcpcore.h:3149
int tcp_sync_mss(struct sock *sk, u32 pmtu)
static __inline void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
Definition: tcpcore.h:2735
static __inline void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
Definition: tcpcore.h:3312
void tcp_clear_xmit_timers(struct sock *)
u32 __tcp_select_window(struct sock *sk)
static __inline void tcp_fast_path_on(struct tcp_opt *tp)
Definition: tcpcore.h:2744
#define TCPOLEN_MSS
Definition: tcpcore.h:2201
void tcp_destroy_sock(struct sock *sk)
#define TCPOPT_NOP
Definition: tcpcore.h:2189
static __inline int tcp_checksum_complete(struct sk_buff *skb)
Definition: tcpcore.h:3182
static __inline void tcp_writequeue_purge(struct sock *sk)
Definition: tcpcore.h:3717
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor)
static __inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
Definition: tcpcore.h:3629
void tcp_delete_keepalive_timer(struct sock *)
enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, struct tcphdr *th, unsigned len)
int tcp_memory_pressure
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss)
#define MAX_TCP_HEADER
Definition: tcpcore.h:2069
static __inline void tcp_mem_reclaim(struct sock *sk)
Definition: tcpcore.h:3640
static __inline int keepalive_intvl_when(struct tcp_opt *tp)
Definition: tcpcore.h:3765
static __inline void tcp_initialize_rcv_mss(struct sock *sk)
Definition: tcpcore.h:2721
static __inline void tcp_moderate_sndbuf(struct sock *sk)
Definition: tcpcore.h:3658
static __inline int tcp_min_write_space(struct sock *sk)
Definition: tcpcore.h:2858
void tcp_send_active_reset(struct sock *sk, int priority)
void tcp_send_partial(struct sock *)
static __inline void tcp_listen_unlock(void)
Definition: tcpcore.h:3757
struct sock * tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
#define CHECKSUM_UNNECESSARY
Definition: tcpcore.h:45
#define TCP_MIN_MSS
Definition: tcpcore.h:2078
static __inline void tcp_dec_quickack_mode(struct tcp_opt *tp)
Definition: tcpcore.h:2466
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
void tcp_push_one(struct sock *, unsigned mss_now)
static __inline unsigned int tcp_current_mss(struct sock *sk)
Definition: tcpcore.h:2694
unsigned int tcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait)
static __inline void tcp_enter_memory_pressure(void)
Definition: tcpcore.h:3648
void tcp_reset_keepalive_timer(struct sock *, unsigned long)
static __inline void tcp_synq_drop(struct sock *sk, struct open_request *req, struct open_request **prev)
Definition: tcpcore.h:3588
void tcp_set_keepalive(struct sock *sk, int val)
void tcp_enter_quickack_mode(struct tcp_opt *tp)
static __inline int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb, unsigned cur_mss, int nonagle)
Definition: tcpcore.h:3043
void tcp_send_ack(struct sock *sk)
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb)
void __kfree_skb(struct sk_buff *skb)
int tcp_mem_schedule(struct sock *sk, int size, int kind)
@ TCP_CA_CWR
Definition: tcpdef.h:141
#define TCPF_CA_CWR
Definition: tcpdef.h:142
#define TCPF_CA_Recovery
Definition: tcpdef.h:144
@ TCP_FLAG_ACK
Definition: tcpdef.h:107
@ TCP_ESTABLISHED
Definition: tcpdef.h:59
DWORD hint
Definition: vfdcmd.c:88

◆ tcp_tw_status

Enumerator
TCP_TW_SUCCESS 
TCP_TW_RST 
TCP_TW_ACK 
TCP_TW_SYN 

Definition at line 2486 of file tcpcore.h.

2491{
2492 if (tp->ack.quick && --tp->ack.quick == 0) {

Function Documentation

◆ ___pskb_trim()

int ___pskb_trim ( struct sk_buff skb,
unsigned int  len,
int  realloc 
)

Referenced by __pskb_trim(), and __skb_trim().

◆ __attribute__()

struct tcp_ehash_bucket __attribute__ ( (__aligned__(8))  )

◆ __dev_alloc_skb()

static __inline struct sk_buff * __dev_alloc_skb ( unsigned int  length,
int  gfp_mask 
)
static

__dev_alloc_skb - allocate an skbuff for sending @length: length to allocate @gfp_mask: get_free_pages mask, passed to alloc_skb

Allocate a new &sk_buff and assign it a usage count of one. The buffer has unspecified headroom built in. Users should allocate the headroom they think they need without accounting for the built in space. The built in space is used for optimisations.

NULL is returned in there is no free memory.

Definition at line 1045 of file tcpcore.h.

1047{
1048 struct sk_buff *skb;
1049
1050 skb = alloc_skb(length+16, gfp_mask);
1051 if (skb)
1052 skb_reserve(skb,16);
1053 return skb;
1054}
GLuint GLsizei GLsizei * length
Definition: glext.h:6040

Referenced by dev_alloc_skb().

◆ __kfree_skb()

void __kfree_skb ( struct sk_buff skb)

Referenced by kfree_skb().

◆ __pskb_pull()

static __inline char * __pskb_pull ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 869 of file tcpcore.h.

870{
871 if (len > skb_headlen(skb) &&
872 __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
873 return NULL;
874 skb->len -= len;
875 return skb->data += len;
876}
unsigned char * data
Definition: tcpcore.h:202
static __inline int skb_headlen(const struct sk_buff *skb)
Definition: tcpcore.h:762
unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)

Referenced by pskb_pull().

◆ __pskb_pull_tail()

unsigned char * __pskb_pull_tail ( struct sk_buff skb,
int  delta 
)

Referenced by __pskb_pull(), and pskb_may_pull().

◆ __pskb_trim()

static __inline int __pskb_trim ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 962 of file tcpcore.h.

963{
964 if (!skb->data_len) {
965 skb->len = len;
966 skb->tail = skb->data+len;
967 return 0;
968 } else {
969 return ___pskb_trim(skb, len, 1);
970 }
971}
unsigned char * tail
Definition: tcpcore.h:203
unsigned int data_len
Definition: tcpcore.h:189
int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)

Referenced by pskb_trim().

◆ __skb_append()

static __inline void __skb_append ( struct sk_buff old,
struct sk_buff newsk 
)
static

Definition at line 647 of file tcpcore.h.

648{
649 __skb_insert(newsk, old, old->next, old->list);
650}
struct sk_buff_head * list
Definition: tcpcore.h:144
static __inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list)
Definition: tcpcore.h:612

Referenced by skb_append().

◆ __skb_dequeue()

static __inline struct sk_buff * __skb_dequeue ( struct sk_buff_head list)
static

__skb_dequeue - remove from the head of the queue @list: list to dequeue from

Remove the head of the list. This function does not take any locks so must be used with appropriate locks held only. The head item is returned or NULL if the list is empty.

Definition at line 568 of file tcpcore.h.

569{
570 struct sk_buff *next, *prev, *result;
571
572 prev = (struct sk_buff *) list;
573 next = prev->next;
574 result = NULL;
575 if (next != prev) {
576 result = next;
577 next = next->next;
578 list->qlen--;
579 next->prev = prev;
580 prev->next = next;
581 result->next = NULL;
582 result->prev = NULL;
583 result->list = NULL;
584 }
585 return result;
586}
Definition: list.h:37
GLuint64EXT * result
Definition: glext.h:11304
struct sk_buff * prev
Definition: tcpcore.h:142

Referenced by __skb_queue_purge(), skb_dequeue(), and tcp_listen_wlock().

◆ __skb_dequeue_tail()

static __inline struct sk_buff * __skb_dequeue_tail ( struct sk_buff_head list)
static

__skb_dequeue_tail - remove from the tail of the queue @list: list to dequeue from

Remove the tail of the list. This function does not take any locks so must be used with appropriate locks held only. The tail item is returned or NULL if the list is empty.

Definition at line 729 of file tcpcore.h.

730{
731 struct sk_buff *skb = skb_peek_tail(list);
732 if (skb)
733 __skb_unlink(skb, list);
734 return skb;
735}
static __inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
Definition: tcpcore.h:677
static __inline struct sk_buff * skb_peek_tail(struct sk_buff_head *list_)
Definition: tcpcore.h:430

Referenced by skb_dequeue_tail().

◆ __skb_insert()

static __inline void __skb_insert ( struct sk_buff newsk,
struct sk_buff prev,
struct sk_buff next,
struct sk_buff_head list 
)
static

Definition at line 612 of file tcpcore.h.

615{
616 newsk->next = next;
617 newsk->prev = prev;
618 next->prev = newsk;
619 prev->next = newsk;
620 newsk->list = list;
621 list->qlen++;
622}
#define list
Definition: rosglue.h:35

Referenced by __skb_append(), and skb_insert().

◆ __skb_pull()

static __inline char * __skb_pull ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 841 of file tcpcore.h.

842{
843 skb->len-=len;
844 if (skb->len < skb->data_len)
845 out_of_line_bug();
846 return skb->data+=len;
847}

Referenced by skb_pull().

◆ __skb_push()

static __inline unsigned char * __skb_push ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 810 of file tcpcore.h.

811{
812 skb->data-=len;
813 skb->len+=len;
814 return skb->data;
815}

◆ __skb_put()

static __inline unsigned char * __skb_put ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 775 of file tcpcore.h.

776{
777 unsigned char *tmp=skb->tail;
779 skb->tail+=len;
780 skb->len+=len;
781 return tmp;
782}
#define SKB_LINEAR_ASSERT(skb)
Definition: tcpcore.h:769

◆ __skb_queue_head()

static __inline void __skb_queue_head ( struct sk_buff_head list,
struct sk_buff newsk 
)
static

__skb_queue_head - queue a buffer at the list head @list: list to use @newsk: buffer to queue

Queue a buffer at the start of a list. This function takes no locks and you must therefore hold required locks before calling it.

A buffer cannot be placed on two lists at the same time.

Definition at line 476 of file tcpcore.h.

477{
478 struct sk_buff *prev, *next;
479
480 newsk->list = list;
481 list->qlen++;
482 prev = (struct sk_buff *)list;
483 next = prev->next;
484 newsk->next = next;
485 newsk->prev = prev;
486 next->prev = newsk;
487 prev->next = newsk;
488}

Referenced by skb_queue_head().

◆ __skb_queue_purge()

static __inline void __skb_queue_purge ( struct sk_buff_head list)
static

__skb_purge - empty a list @list: list to empty

Delete all buffers on an &sk_buff list. Each buffer is removed from the list and one reference dropped. This function does not take the list lock and the caller must hold the relevant locks to use it.

Definition at line 1025 of file tcpcore.h.

1026{
1027 struct sk_buff *skb;
1028 while ((skb=__skb_dequeue(list))!=NULL)
1029 kfree_skb(skb);
1030}
static __inline void kfree_skb(struct sk_buff *skb)
Definition: tcpcore.h:297

◆ __skb_queue_tail()

static __inline void __skb_queue_tail ( struct sk_buff_head list,
struct sk_buff newsk 
)
static

__skb_queue_tail - queue a buffer at the list tail @list: list to use @newsk: buffer to queue

Queue a buffer at the end of a list. This function takes no locks and you must therefore hold required locks before calling it.

A buffer cannot be placed on two lists at the same time.

Definition at line 524 of file tcpcore.h.

525{
526 struct sk_buff *prev, *next;
527
528 newsk->list = list;
529 list->qlen++;
530 next = (struct sk_buff *)list;
531 prev = next->prev;
532 newsk->next = next;
533 newsk->prev = prev;
534 next->prev = newsk;
535 prev->next = newsk;
536}

Referenced by skb_queue_tail().

◆ __skb_trim()

static __inline void __skb_trim ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 935 of file tcpcore.h.

936{
937 if (!skb->data_len) {
938 skb->len = len;
939 skb->tail = skb->data+len;
940 } else {
941 ___pskb_trim(skb, len, 0);
942 }
943}

Referenced by skb_trim().

◆ __skb_unlink()

static __inline void __skb_unlink ( struct sk_buff skb,
struct sk_buff_head list 
)
static

Definition at line 677 of file tcpcore.h.

678{
679 struct sk_buff * next, * prev;
680
681 list->qlen--;
682 next = skb->next;
683 prev = skb->prev;
684 skb->next = NULL;
685 skb->prev = NULL;
686 skb->list = NULL;
687 next->prev = prev;
688 prev->next = next;
689}

Referenced by __skb_dequeue_tail(), and skb_unlink().

◆ __tcp_checksum_complete()

static __inline int __tcp_checksum_complete ( struct sk_buff skb)
static

Definition at line 3173 of file tcpcore.h.

3174{
3175#if 0
3176 tp->snd_wl1 = seq;
3177#endif
3178}
3179
3180extern void tcp_destroy_sock(struct sock *sk);

◆ __tcp_enter_cwr()

static __inline void __tcp_enter_cwr ( struct tcp_opt tp)
static

Definition at line 2964 of file tcpcore.h.

2970{
2971#if 0
2972 if (tp->packets_out >= tp->snd_cwnd) {
2973 /* Network is feed fully. */
2974 tp->snd_cwnd_used = 0;
2975 tp->snd_cwnd_stamp = tcp_time_stamp;
2976 } else {

◆ __tcp_fast_path_on()

static __inline void __tcp_fast_path_on ( struct tcp_opt tp,
u32  snd_wnd 
)
static

Definition at line 2735 of file tcpcore.h.

◆ __tcp_mem_reclaim()

void __tcp_mem_reclaim ( struct sock sk)

◆ __tcp_push_pending_frames()

static __inline void __tcp_push_pending_frames ( struct sock sk,
struct tcp_opt tp,
unsigned  cur_mss,
int  nonagle 
)
static

Definition at line 3102 of file tcpcore.h.

3106{
3107#if 0
3108 if (!tp->packets_out && !tp->pending)
3110#endif
3111}
3112
3113static __inline int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
3114{
3115#if 0
3116 return (skb->next == (struct sk_buff*)&sk->write_queue);
3117#else
3118 return 0;
3119#endif

◆ __tcp_put_port()

void __tcp_put_port ( struct sock sk)

◆ __tcp_select_window()

u32 __tcp_select_window ( struct sock sk)

◆ after()

◆ alloc_skb()

struct sk_buff * alloc_skb ( unsigned int  size,
int  priority 
)

Referenced by __dev_alloc_skb(), and tcp_alloc_skb().

◆ before()

◆ between()

__inline int between ( __u32  seq1,
__u32  seq2,
__u32  seq3 
)

Definition at line 2402 of file tcpcore.h.

◆ cookie_v4_check()

struct sock * cookie_v4_check ( struct sock sk,
struct sk_buff skb,
struct ip_options *  opt 
)

◆ cookie_v4_init_sequence()

__u32 cookie_v4_init_sequence ( struct sock sk,
struct sk_buff skb,
__u16 mss 
)

◆ datagram_poll()

unsigned int datagram_poll ( struct file file,
struct socket sock,
struct poll_table_struct wait 
)

◆ dev_alloc_skb()

static __inline struct sk_buff * dev_alloc_skb ( unsigned int  length)
static

dev_alloc_skb - allocate an skbuff for sending @length: length to allocate

Allocate a new &sk_buff and assign it a usage count of one. The buffer has unspecified headroom built in. Users should allocate the headroom they think they need without accounting for the built in space. The built in space is used for optimisations.

NULL is returned in there is no free memory. Although this function allocates memory it can be called from an interrupt.

Definition at line 1069 of file tcpcore.h.

1070{
1071#if 0
1073#else
1074 return NULL;
1075#endif
1076}
#define GFP_ATOMIC
Definition: module.h:665
static __inline struct sk_buff * __dev_alloc_skb(unsigned int length, int gfp_mask)
Definition: tcpcore.h:1045

◆ keepalive_intvl_when()

static __inline int keepalive_intvl_when ( struct tcp_opt tp)
static

Definition at line 3765 of file tcpcore.h.

3772{

◆ keepalive_time_when()

static __inline int keepalive_time_when ( struct tcp_opt tp)
static

Definition at line 3774 of file tcpcore.h.

◆ kfree_skb()

static __inline void kfree_skb ( struct sk_buff skb)
static

kfree_skb - free an sk_buff @skb: buffer to free

Drop a reference to the buffer and free it if the usage count has hit zero.

Definition at line 297 of file tcpcore.h.

298{
299 if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
300 __kfree_skb(skb);
301}
atomic_t users
Definition: tcpcore.h:196

Referenced by __skb_queue_purge(), skb_queue_purge(), skb_share_check(), and skb_unshare().

◆ kfree_skb_fast()

static __inline void kfree_skb_fast ( struct sk_buff skb)
static

Definition at line 304 of file tcpcore.h.

305{
306 if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
307 kfree_skbmem(skb);
308}
void kfree_skbmem(struct sk_buff *skb)

◆ kfree_skbmem()

void kfree_skbmem ( struct sk_buff skb)

Referenced by kfree_skb_fast().

◆ kmap_skb_frag()

static __inline void * kmap_skb_frag ( const skb_frag_t frag)
static

Definition at line 1117 of file tcpcore.h.

1118{
1119#if 0
1120#ifdef CONFIG_HIGHMEM
1121 if (in_irq())
1122 out_of_line_bug();
1123
1124 local_bh_disable();
1125#endif
1126 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
1127#else
1128 return NULL;
1129#endif
1130}
struct page * page
Definition: tcpcore.h:124

◆ kunmap_skb_frag()

static __inline void kunmap_skb_frag ( void vaddr)
static

Definition at line 1132 of file tcpcore.h.

1133{
1134#if 0
1135 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1136#ifdef CONFIG_HIGHMEM
1137 local_bh_enable();
1138#endif
1139#endif
1140}

◆ pskb_copy()

struct sk_buff * pskb_copy ( struct sk_buff skb,
int  gfp_mask 
)

◆ pskb_expand_head()

int pskb_expand_head ( struct sk_buff skb,
int  nhead,
int  ntail,
int  gfp_mask 
)

Referenced by skb_cow().

◆ pskb_may_pull()

static __inline int pskb_may_pull ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 885 of file tcpcore.h.

886{
887 if (len <= skb_headlen(skb))
888 return 1;
889 if (len > skb->len)
890 return 0;
891 return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
892}

◆ pskb_pull()

static __inline unsigned char * pskb_pull ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 878 of file tcpcore.h.

879{
880 if (len > skb->len)
881 return NULL;
882 return __pskb_pull(skb,len);
883}
static __inline char * __pskb_pull(struct sk_buff *skb, unsigned int len)
Definition: tcpcore.h:869

◆ pskb_trim()

static __inline int pskb_trim ( struct sk_buff skb,
unsigned int  len 
)
static

Definition at line 973 of file tcpcore.h.

974{
975 if (len < skb->len)
976 return __pskb_trim(skb, len);
977 return 0;
978}
static __inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
Definition: tcpcore.h:962

◆ skb_add_mtu()

void skb_add_mtu ( int  mtu)

◆ skb_append()

static __inline void skb_append ( struct sk_buff old,
struct sk_buff newsk 
)
static

skb_append - append a buffer @old: buffer to insert after @newsk: buffer to insert

Place a packet after a given packet in a list. The list locks are taken and this function is atomic with respect to other list locked calls. A buffer cannot be placed on two lists at the same time.

Definition at line 663 of file tcpcore.h.

664{
665 unsigned long flags;
666
667 spin_lock_irqsave(&old->list->lock, flags);
668 __skb_append(old, newsk);
669 spin_unlock_irqrestore(&old->list->lock, flags);
670}
#define spin_lock_irqsave(sl, flags)
Definition: module.h:308
#define spin_unlock_irqrestore(sl, flags)
Definition: module.h:309
static __inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
Definition: tcpcore.h:647

◆ skb_checksum()

unsigned int skb_checksum ( const struct sk_buff skb,
int  offset,
int  len,
unsigned int  csum 
)

◆ skb_clone()

struct sk_buff * skb_clone ( struct sk_buff skb,
int  priority 
)

Referenced by skb_share_check().

◆ skb_cloned()

static __inline int skb_cloned ( struct sk_buff skb)
static

skb_cloned - is the buffer a clone @skb: buffer to check

Returns true if the buffer was generated with skb_clone() and is one of multiple shared copies of the buffer. Cloned buffers are shared data so must not be written to under normal circumstances.

Definition at line 319 of file tcpcore.h.

320{
321 return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
322}
unsigned char cloned
Definition: tcpcore.h:192
#define skb_shinfo(SKB)
Definition: tcpcore.h:256

Referenced by skb_cow(), and skb_unshare().

◆ skb_copy()

struct sk_buff * skb_copy ( const struct sk_buff skb,
int  priority 
)

Referenced by skb_unshare().

◆ skb_copy_and_csum_bits()

unsigned int skb_copy_and_csum_bits ( const struct sk_buff skb,
int  offset,
u8 to,
int  len,
unsigned int  csum 
)

◆ skb_copy_and_csum_datagram()

int skb_copy_and_csum_datagram ( const struct sk_buff skb,
int  offset,
u8 to,
int  len,
unsigned int csump 
)

◆ skb_copy_and_csum_datagram_iovec()

int skb_copy_and_csum_datagram_iovec ( const struct sk_buff skb,
int  hlen,
struct iovec iov 
)

◆ skb_copy_and_csum_dev()

void skb_copy_and_csum_dev ( const struct sk_buff skb,
u8 to 
)

◆ skb_copy_bits()

int skb_copy_bits ( const struct sk_buff skb,
int  offset,
void to,
int  len 
)

◆ skb_copy_datagram()

int skb_copy_datagram ( const struct sk_buff from,
int  offset,
char to,
int  size 
)

◆ skb_copy_datagram_iovec()

int skb_copy_datagram_iovec ( const struct sk_buff from,
int  offset,
struct iovec to,
int  size 
)

◆ skb_copy_expand()

struct sk_buff * skb_copy_expand ( const struct sk_buff skb,
int  newheadroom,
int  newtailroom,
int  priority 
)

◆ skb_cow()

static __inline int skb_cow ( struct sk_buff skb,
unsigned int  headroom 
)
static

skb_cow - copy header of skb when it is required @skb: buffer to cow @headroom: needed headroom

If the skb passed lacks sufficient headroom or its data part is shared, data is reallocated. If reallocation fails, an error is returned and original skb is not changed.

The result is skb with writable area skb->head...skb->tail and at least @headroom of space at head.

Definition at line 1092 of file tcpcore.h.

1093{
1094#if 0
1095 int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
1096
1097 if (delta < 0)
1098 delta = 0;
1099
1100 if (delta || skb_cloned(skb))
1101 return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
1102 return 0;
1103#else
1104 return 0;
1105#endif
1106}
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
static __inline int skb_headroom(const struct sk_buff *skb)
Definition: tcpcore.h:901
static __inline int skb_cloned(struct sk_buff *skb)
Definition: tcpcore.h:319

◆ skb_dequeue()

static __inline struct sk_buff * skb_dequeue ( struct sk_buff_head list)
static

skb_dequeue - remove from the head of the queue @list: list to dequeue from

Remove the head of the list. The list lock is taken so the function may be used safely with other locking list functions. The head item is returned or NULL if the list is empty.

Definition at line 597 of file tcpcore.h.

598{
599 unsigned long flags;
600 struct sk_buff *result;
601
605 return result;
606}

Referenced by skb_queue_purge().

◆ skb_dequeue_tail()

static __inline struct sk_buff * skb_dequeue_tail ( struct sk_buff_head list)
static

skb_dequeue - remove from the head of the queue @list: list to dequeue from

Remove the head of the list. The list lock is taken so the function may be used safely with other locking list functions. The tail item is returned or NULL if the list is empty.

Definition at line 746 of file tcpcore.h.

747{
748 unsigned long flags;
749 struct sk_buff *result;
750
754 return result;
755}
static __inline struct sk_buff * __skb_dequeue_tail(struct sk_buff_head *list)
Definition: tcpcore.h:729

◆ skb_free_datagram()

void skb_free_datagram ( struct sock sk,
struct sk_buff skb 
)

◆ skb_get()

static __inline struct sk_buff * skb_get ( struct sk_buff skb)
static

skb_get - reference buffer @skb: buffer to reference

Makes another reference to a socket buffer and returns a pointer to the buffer.

Definition at line 278 of file tcpcore.h.

279{
280 atomic_inc(&skb->users);
281 return skb;
282}

◆ skb_headlen()

static __inline int skb_headlen ( const struct sk_buff skb)
static

Definition at line 762 of file tcpcore.h.

763{
764 return skb->len - skb->data_len;
765}

Referenced by __pskb_pull(), and pskb_may_pull().

◆ skb_headroom()

static __inline int skb_headroom ( const struct sk_buff skb)
static

skb_headroom - bytes at buffer head @skb: buffer to check

Return the number of bytes of free space at the head of an &sk_buff.

Definition at line 901 of file tcpcore.h.

902{
903 return skb->data-skb->head;
904}
unsigned char * head
Definition: tcpcore.h:201

Referenced by skb_cow().

◆ skb_init()

void skb_init ( void  )

◆ skb_insert()

static __inline void skb_insert ( struct sk_buff old,
struct sk_buff newsk 
)
static

skb_insert - insert a buffer @old: buffer to insert before @newsk: buffer to insert

Place a packet before a given packet in a list. The list locks are taken and this function is atomic with respect to other list locked calls A buffer cannot be placed on two lists at the same time.

Definition at line 634 of file tcpcore.h.

635{
636 unsigned long flags;
637
638 spin_lock_irqsave(&old->list->lock, flags);
639 __skb_insert(newsk, old->prev, old, old->list);
640 spin_unlock_irqrestore(&old->list->lock, flags);
641}

◆ skb_is_nonlinear()

static __inline int skb_is_nonlinear ( const struct sk_buff skb)
static

Definition at line 757 of file tcpcore.h.

758{
759 return skb->data_len;
760}

Referenced by skb_tailroom().

◆ skb_linearize()

int skb_linearize ( struct sk_buff skb,
int  gfp 
)

skb_linearize - convert paged skb to linear one @skb: buffer to linarize @gfp: allocation mode

If there is no free memory -ENOMEM is returned, otherwise zero is returned and the old skb data released.

◆ skb_orphan()

static __inline void skb_orphan ( struct sk_buff skb)
static

skb_orphan - orphan a buffer @skb: buffer to orphan

If a buffer currently has an owner then we call the owner's destructor function and make the @skb unowned. The buffer continues to exist but is no longer charged to its former owner.

Definition at line 990 of file tcpcore.h.

991{
992 if (skb->destructor)
993 skb->destructor(skb);
994 skb->destructor = NULL;
995 skb->sk = NULL;
996}

◆ skb_over_panic()

void skb_over_panic ( struct sk_buff skb,
int  len,
void here 
)

Referenced by skb_put().

◆ skb_peek()

static __inline struct sk_buff * skb_peek ( struct sk_buff_head list_)
static

skb_peek @list_: list to peek at

Peek an &sk_buff. Unlike most other operations you MUST be careful with this one. A peek leaves the buffer on the list and someone else may run off with it. You must hold the appropriate locks or have a private queue to do this.

Returns NULL for an empty list or a pointer to the head element. The reference count is not incremented and the reference is therefore volatile. Use with caution.

Definition at line 408 of file tcpcore.h.

409{
410 struct sk_buff *list = ((struct sk_buff *)list_)->next;
411 if (list == (struct sk_buff *)list_)
412 list = NULL;
413 return list;
414}
struct list * next
Definition: list.h:38

◆ skb_peek_tail()

static __inline struct sk_buff * skb_peek_tail ( struct sk_buff_head list_)
static

skb_peek_tail @list_: list to peek at

Peek an &sk_buff. Unlike most other operations you MUST be careful with this one. A peek leaves the buffer on the list and someone else may run off with it. You must hold the appropriate locks or have a private queue to do this.

Returns NULL for an empty list or a pointer to the tail element. The reference count is not incremented and the reference is therefore volatile. Use with caution.

Definition at line 430 of file tcpcore.h.

431{
432 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
433 if (list == (struct sk_buff *)list_)
434 list = NULL;
435 return list;
436}
struct list * prev
Definition: list.h:39

Referenced by __skb_dequeue_tail().

◆ skb_pull()

static __inline unsigned char * skb_pull ( struct sk_buff skb,
unsigned int  len 
)
static

skb_pull - remove data from the start of a buffer @skb: buffer to use @len: amount of data to remove

This function removes data from the start of a buffer, returning the memory to the headroom. A pointer to the next data in the buffer is returned. Once the data has been pulled future pushes will overwrite the old data.

Definition at line 860 of file tcpcore.h.

861{
862 if (len > skb->len)
863 return NULL;
864 return __skb_pull(skb,len);
865}
static __inline char * __skb_pull(struct sk_buff *skb, unsigned int len)
Definition: tcpcore.h:841

◆ skb_push()

static __inline unsigned char * skb_push ( struct sk_buff skb,
unsigned int  len 
)
static

skb_push - add data to the start of a buffer @skb: buffer to use @len: amount of data to add

This function extends the used data area of the buffer at the buffer start. If this would exceed the total buffer headroom the kernel will panic. A pointer to the first byte of the extra data is returned.

Definition at line 827 of file tcpcore.h.

828{
829#if 0
830 skb->data-=len;
831 skb->len+=len;
832 if(skb->data<skb->head) {
833 skb_under_panic(skb, len, current_text_addr());
834 }
835 return skb->data;
836#else
837 return NULL;
838#endif
839}
void skb_under_panic(struct sk_buff *skb, int len, void *here)

◆ skb_put()

static __inline unsigned char * skb_put ( struct sk_buff skb,
unsigned int  len 
)
static

skb_put - add data to a buffer @skb: buffer to use @len: amount of data to add

This function extends the used data area of the buffer. If this would exceed the total buffer size the kernel will panic. A pointer to the first byte of the extra data is returned.

Definition at line 794 of file tcpcore.h.

795{
796#if 0
797 unsigned char *tmp=skb->tail;
799 skb->tail+=len;
800 skb->len+=len;
801 if(skb->tail>skb->end) {
802 skb_over_panic(skb, len, current_text_addr());
803 }
804 return tmp;
805#else
806return NULL;
807#endif
808}
unsigned char * end
Definition: tcpcore.h:204
void skb_over_panic(struct sk_buff *skb, int len, void *here)

◆ skb_queue_empty()

static __inline int skb_queue_empty ( struct sk_buff_head list)
static

skb_queue_empty - check if a queue is empty @list: queue head

Returns true if the queue is empty, false otherwise.

Definition at line 265 of file tcpcore.h.

266{
267 return (list->next == (struct sk_buff *) list);
268}

◆ skb_queue_head()

static __inline void skb_queue_head ( struct sk_buff_head list,
struct sk_buff newsk 
)
static

skb_queue_head - queue a buffer at the list head @list: list to use @newsk: buffer to queue

Queue a buffer at the start of the list. This function takes the list lock and can be used safely with other locking &sk_buff functions safely.

A buffer cannot be placed on two lists at the same time.

Definition at line 503 of file tcpcore.h.

504{
505 unsigned long flags;
506
508 __skb_queue_head(list, newsk);
510}
static __inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
Definition: tcpcore.h:476

◆ skb_queue_head_init()

static __inline void skb_queue_head_init ( struct sk_buff_head list)
static

Definition at line 450 of file tcpcore.h.

451{
452 spin_lock_init(&list->lock);
453 list->prev = (struct sk_buff *)list;
454 list->next = (struct sk_buff *)list;
455 list->qlen = 0;
456}
#define spin_lock_init(sl)
Definition: module.h:305

◆ skb_queue_len()

static __inline __u32 skb_queue_len ( struct sk_buff_head list_)
static

skb_queue_len - get queue length @list_: list to measure

Return the length of an &sk_buff queue.

Definition at line 445 of file tcpcore.h.

446{
447 return(list_->qlen);
448}
__u32 qlen
Definition: tcpcore.h:112

◆ skb_queue_purge()

static __inline void skb_queue_purge ( struct sk_buff_head list)
static

skb_purge - empty a list @list: list to empty

Delete all buffers on an &sk_buff list. Each buffer is removed from the list and one reference dropped. This function takes the list lock and is atomic with respect to other list locking functions.

Definition at line 1008 of file tcpcore.h.

1009{
1010 struct sk_buff *skb;
1011 while ((skb=skb_dequeue(list))!=NULL)
1012 kfree_skb(skb);
1013}
static __inline struct sk_buff * skb_dequeue(struct sk_buff_head *list)
Definition: tcpcore.h:597

◆ skb_queue_tail()

static __inline void skb_queue_tail ( struct sk_buff_head list,
struct sk_buff newsk 
)
static

skb_queue_tail - queue a buffer at the list tail @list: list to use @newsk: buffer to queue

Queue a buffer at the tail of the list. This function takes the list lock and can be used safely with other locking &sk_buff functions safely.

A buffer cannot be placed on two lists at the same time.

Definition at line 550 of file tcpcore.h.

551{
552 unsigned long flags;
553
555 __skb_queue_tail(list, newsk);
557}

◆ skb_realloc_headroom()

struct sk_buff * skb_realloc_headroom ( struct sk_buff skb,
unsigned int  headroom 
)

◆ skb_recv_datagram()

struct sk_buff * skb_recv_datagram ( struct sock sk,
unsigned  flags,
int  noblock,
int err 
)

◆ skb_reserve()

static __inline void skb_reserve ( struct sk_buff skb,
unsigned int  len 
)
static

skb_reserve - adjust headroom @skb: buffer to alter @len: bytes to move

Increase the headroom of an empty &sk_buff by reducing the tail room. This is only allowed for an empty buffer.

Definition at line 927 of file tcpcore.h.

928{
929 skb->data+=len;
930 skb->tail+=len;
931}

Referenced by __dev_alloc_skb(), and tcp_alloc_page().

◆ skb_share_check()

static __inline struct sk_buff * skb_share_check ( struct sk_buff skb,
int  pri 
)
static

skb_share_check - check if buffer is shared and if so clone it @skb: buffer to check @pri: priority for memory allocation

If the buffer is shared the buffer is cloned and the old copy drops a reference. A new clone with a single reference is returned. If the buffer is not shared the original buffer is returned. When being called from interrupt status or with spinlocks held pri must be GFP_ATOMIC.

NULL is returned on a memory allocation failure.

Definition at line 351 of file tcpcore.h.

352{
353 if (skb_shared(skb)) {
354 struct sk_buff *nskb;
355 nskb = skb_clone(skb, pri);
356 kfree_skb(skb);
357 return nskb;
358 }
359 return skb;
360}
static __inline int skb_shared(struct sk_buff *skb)
Definition: tcpcore.h:332
struct sk_buff * skb_clone(struct sk_buff *skb, int priority)

◆ skb_shared()

static __inline int skb_shared ( struct sk_buff skb)
static

skb_shared - is the buffer shared @skb: buffer to check

Returns true if more than one person has a reference to this buffer.

Definition at line 332 of file tcpcore.h.

333{
334 return (atomic_read(&skb->users) != 1);
335}

Referenced by skb_share_check().

◆ skb_tailroom()

static __inline int skb_tailroom ( const struct sk_buff skb)
static

skb_tailroom - bytes at buffer end @skb: buffer to check

Return the number of bytes of free space at the tail of an sk_buff

Definition at line 913 of file tcpcore.h.

914{
915 return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
916}
static __inline int skb_is_nonlinear(const struct sk_buff *skb)
Definition: tcpcore.h:757

◆ skb_trim()

static __inline void skb_trim ( struct sk_buff skb,
unsigned int  len 
)
static

skb_trim - remove end from a buffer @skb: buffer to alter @len: new length

Cut the length of a buffer down by removing data from the tail. If the buffer is already under the length specified it is not modified.

Definition at line 954 of file tcpcore.h.

955{
956 if (skb->len > len) {
957 __skb_trim(skb, len);
958 }
959}
static __inline void __skb_trim(struct sk_buff *skb, unsigned int len)
Definition: tcpcore.h:935

◆ skb_under_panic()

void skb_under_panic ( struct sk_buff skb,
int  len,
void here 
)

Referenced by skb_push().

◆ skb_unlink()

static __inline void skb_unlink ( struct sk_buff skb</