39 #define HAVE_ALLOC_SKB 40 #define HAVE_ALIGNABLE_SKB 43 #define CHECKSUM_NONE 0 45 #define CHECKSUM_UNNECESSARY 2 47 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1)) 48 #define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1)) 49 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0)) 50 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2)) 91 #define NET_CALLER(arg) (*(((void**)&arg)-1)) 93 #define NET_CALLER(arg) __builtin_return_address(0) 96 #ifdef CONFIG_NETFILTER 103 struct nf_conntrack *master;
118 #define MAX_SKB_FRAGS 6 207 #ifdef CONFIG_NETFILTER 209 unsigned long nfmark;
213 struct nf_ct_info *nfct;
214 #ifdef CONFIG_NETFILTER_DEBUG 215 unsigned int nf_debug;
219 #if defined(CONFIG_HIPPI) 225 #ifdef CONFIG_NET_SCHED 230 #define SK_WMEM_MAX 65535 231 #define SK_RMEM_MAX 65535 251 #define dev_kfree_skb(a) kfree_skb(a) 256 #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) 767 #define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0) 768 #define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0) 769 #define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0) 777 unsigned char *tmp=skb->
tail;
797 unsigned char *tmp=skb->
tail;
1095 int delta = (headroom > 16 ? headroom : 16) -
skb_headroom(skb);
1120 #ifdef CONFIG_HIGHMEM 1126 return kmap_atomic(frag->
page, KM_SKB_DATA_SOFTIRQ);
1135 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1136 #ifdef CONFIG_HIGHMEM 1142 #define skb_queue_walk(queue, skb) \ 1143 for (skb = (queue)->next; \ 1144 (skb != (struct sk_buff *)(queue)); \ 1164 #ifdef CONFIG_NETFILTER 1165 static __inline
void 1166 nf_conntrack_put(
struct nf_ct_info *nfct)
1169 nfct->master->destroy(nfct->master);
1171 static __inline
void 1172 nf_conntrack_get(
struct nf_ct_info *nfct)
1225 int optname,
char *optval,
int optlen);
1227 int optname,
char *optval,
1235 struct sockaddr *uaddr,
int addr_len);
1536 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 1538 struct ipv6_pinfo af_inet6;
1544 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE) 1545 struct raw_opt tp_raw4;
1547 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 1548 struct raw6_opt tp_raw;
1550 #if defined(CONFIG_SPX) || defined (CONFIG_SPX_MODULE) 1551 struct spx_opt af_spx;
1571 #ifdef CONFIG_FILTER 1573 struct sk_filter *
filter;
1582 #if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE) 1583 struct inet_opt af_inet;
1585 #if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE) 1586 struct atalk_sock af_at;
1588 #if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE) 1589 struct ipx_opt af_ipx;
1591 #if defined (CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE) 1594 #if defined (CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE) 1595 struct packet_opt *af_packet;
1597 #if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE) 1600 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 1603 #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) 1606 #if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE) 1609 #if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE) 1610 struct pppox_opt *pppox;
1613 #if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE) 1614 struct econet_opt *af_econet;
1616 #if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE) 1617 struct atm_vcc *af_atm;
1619 #if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE) 1620 struct irda_sock *irda;
1622 #if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE) 1623 struct wanpipe_opt *af_wanpipe;
1656 #include <net/neighbour.h> 1664 #define RT_CACHE_DEBUG 0 1666 #define DST_GC_MIN (1*HZ) 1667 #define DST_GC_INC (5*HZ) 1668 #define DST_GC_MAX (120*HZ) 1705 #ifdef CONFIG_NET_CLS_ROUTE 1756 extern void * dst_alloc(
struct dst_ops *
ops);
1763 if (
dst->obsolete > 1)
1772 static __inline
void dst_confirm(
struct dst_entry *
dst)
1775 neigh_confirm(
dst->neighbour);
1778 static __inline
void dst_negative_advice(
struct dst_entry **dst_p)
1781 if (
dst &&
dst->ops->negative_advice)
1782 *dst_p =
dst->
ops->negative_advice(
dst);
1785 static __inline
void dst_link_failure(
struct sk_buff *skb)
1788 if (
dst &&
dst->ops &&
dst->ops->link_failure)
1789 dst->ops->link_failure(skb);
1799 if (
dst->expires == 0 || (
long)(
dst->expires -
expires) > 0)
1803 extern void dst_init(
void);
1816 #define FASTRETRANS_DEBUG 1 1819 #undef TCP_CLEAR_TIMERS 1823 #include <linux/tcp.h> 1825 #include <linux/cache.h> 1826 #include <net/checksum.h> 1827 #include <net/sock.h> 1842 #define TCP_LHTABLE_SIZE 32 1925 #define tcp_ehash (tcp_hashinfo.__tcp_ehash) 1926 #define tcp_bhash (tcp_hashinfo.__tcp_bhash) 1927 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size) 1928 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size) 1929 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash) 1930 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock) 1931 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users) 1932 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait) 1933 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock) 1937 unsigned short snum);
1987 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1998 #ifdef INET_REFCNT_DEBUG 2015 #define TCP_COMBINED_PORTS(__sport, __dport) \ 2016 (((__u32)(__sport)<<16) | (__u32)(__dport)) 2018 #define TCP_COMBINED_PORTS(__sport, __dport) \ 2019 (((__u32)(__dport)<<16) | (__u32)(__sport)) 2022 #if (BITS_PER_LONG == 64) 2024 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \ 2025 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr)); 2027 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \ 2028 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr)); 2030 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 2031 (((*((__u64 *)&((__sk)->daddr)))== (__cookie)) && \ 2032 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \ 2033 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) 2035 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) 2036 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 2037 (((__sk)->daddr == (__saddr)) && \ 2038 ((__sk)->rcv_saddr == (__daddr)) && \ 2039 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \ 2040 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) 2043 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ 2044 (((*((__u32 *)&((__sk)->dport)))== (__ports)) && \ 2045 ((__sk)->family == AF_INET6) && \ 2046 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr)) && \ 2047 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr)) && \ 2048 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) 2069 #define MAX_TCP_HEADER (128 + MAX_HEADER) 2075 #define MAX_TCP_WINDOW 32767U 2078 #define TCP_MIN_MSS 88U 2081 #define TCP_MIN_RCVMSS 536U 2084 #define TCP_FASTRETRANS_THRESH 3 2087 #define TCP_MAX_REORDERING 127 2090 #define TCP_MAX_QUICKACKS 16U 2093 #define TCP_URG_VALID 0x0100 2094 #define TCP_URG_NOTYET 0x0200 2095 #define TCP_URG_READ 0x0400 2104 #define TCP_RETR2 15 2111 #define TCP_SYN_RETRIES 5 2114 #define TCP_SYNACK_RETRIES 5 2118 #define TCP_ORPHAN_RETRIES 7 2123 #define TCP_TIMEWAIT_LEN (60*1000) 2127 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 2134 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) 2136 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) 2137 #define TCP_ATO_MIN ((unsigned)(HZ/25)) 2139 #define TCP_DELACK_MIN 4U 2140 #define TCP_ATO_MIN 4U 2142 #define TCP_RTO_MAX ((unsigned)(120*HZ)) 2143 #define TCP_RTO_MIN ((unsigned)(HZ/5)) 2144 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) 2146 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) 2150 #define TCP_KEEPALIVE_TIME (120*60*HZ) 2151 #define TCP_KEEPALIVE_PROBES 9 2152 #define TCP_KEEPALIVE_INTVL (75*HZ) 2154 #define MAX_TCP_KEEPIDLE 32767 2155 #define MAX_TCP_KEEPINTVL 32767 2156 #define MAX_TCP_KEEPCNT 127 2157 #define MAX_TCP_SYNCNT 127 2160 #define TCP_TWKILL_SLOTS 8 2161 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS) 2163 #define TCP_SYNQ_INTERVAL (HZ/5) 2164 #define TCP_SYNQ_HSIZE 512 2166 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) 2167 #define TCP_PAWS_MSL 60 2173 #define TCP_PAWS_WINDOW 1 2178 #define TCP_TW_RECYCLE_SLOTS_LOG 5 2179 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG) 2186 #if HZ <= 16 || HZ > 4096 2187 # error Unsupported: HZ <= 16 or HZ > 4096 2189 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG) 2191 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG) 2193 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG) 2195 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG) 2197 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG) 2199 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG) 2201 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG) 2203 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG) 2206 #define TCP_TW_RECYCLE_TICK (0) 2213 #define TCPOPT_NOP 1 2214 #define TCPOPT_EOL 0 2215 #define TCPOPT_MSS 2 2216 #define TCPOPT_WINDOW 3 2217 #define TCPOPT_SACK_PERM 4 2218 #define TCPOPT_SACK 5 2219 #define TCPOPT_TIMESTAMP 8 2225 #define TCPOLEN_MSS 4 2226 #define TCPOLEN_WINDOW 3 2227 #define TCPOLEN_SACK_PERM 2 2228 #define TCPOLEN_TIMESTAMP 10 2231 #define TCPOLEN_TSTAMP_ALIGNED 12 2232 #define TCPOLEN_WSCALE_ALIGNED 4 2233 #define TCPOLEN_SACKPERM_ALIGNED 4 2234 #define TCPOLEN_SACK_BASE 2 2235 #define TCPOLEN_SACK_BASE_ALIGNED 4 2236 #define TCPOLEN_SACK_PERBLOCK 8 2238 #define TCP_TIME_RETRANS 1 2239 #define TCP_TIME_DACK 2 2240 #define TCP_TIME_PROBE0 3 2241 #define TCP_TIME_KEEPOPEN 4 2245 extern int sysctl_max_syn_backlog;
2246 extern int sysctl_tcp_timestamps;
2247 extern int sysctl_tcp_window_scaling;
2248 extern int sysctl_tcp_sack;
2249 extern int sysctl_tcp_fin_timeout;
2250 extern int sysctl_tcp_tw_recycle;
2251 extern int sysctl_tcp_keepalive_time;
2252 extern int sysctl_tcp_keepalive_probes;
2253 extern int sysctl_tcp_keepalive_intvl;
2254 extern int sysctl_tcp_syn_retries;
2255 extern int sysctl_tcp_synack_retries;
2256 extern int sysctl_tcp_retries1;
2257 extern int sysctl_tcp_retries2;
2258 extern int sysctl_tcp_orphan_retries;
2259 extern int sysctl_tcp_syncookies;
2260 extern int sysctl_tcp_retrans_collapse;
2261 extern int sysctl_tcp_stdurg;
2262 extern int sysctl_tcp_rfc1337;
2263 extern int sysctl_tcp_abort_on_overflow;
2264 extern int sysctl_tcp_max_orphans;
2265 extern int sysctl_tcp_max_tw_buckets;
2266 extern int sysctl_tcp_fack;
2267 extern int sysctl_tcp_reordering;
2268 extern int sysctl_tcp_ecn;
2269 extern int sysctl_tcp_dsack;
2270 extern int sysctl_tcp_mem[3];
2271 extern int sysctl_tcp_wmem[3];
2272 extern int sysctl_tcp_rmem[3];
2273 extern int sysctl_tcp_app_win;
2274 extern int sysctl_tcp_adv_win_scale;
2275 extern int sysctl_tcp_tw_reuse;
2298 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 2299 struct tcp_v6_open_req {
2332 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 2333 struct tcp_v6_open_req v6_req;
2341 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC) 2342 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req) 2346 req->
class->destructor(req);
2350 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2351 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 2353 #define TCP_INET_FAMILY(fam) 1 2416 return (
__s32)(seq1-seq2) < 0;
2421 return (
__s32)(seq2-seq1) < 0;
2428 return seq3 - seq2 >= seq1 - seq2;
2434 #ifdef ROS_STATISTICS 2435 extern struct tcp_mib tcp_statistics[NR_CPUS*2];
2437 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field) 2438 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field) 2439 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field) 2492 if (
tp->ack.quick && --
tp->ack.quick == 0) {
2507 tp->tstamp_ok =
tp->sack_ok =
tp->wscale_ok =
tp->snd_wscale = 0;
2541 int optname,
char *optval,
2544 int optname,
char *optval,
2550 int flags,
int *addr_len);
2605 struct ip_options *opt);
2654 #ifdef TCP_CLEAR_TIMERS 2655 if (timer_pending(&
tp->retransmit_timer) &&
2656 del_timer(&
tp->retransmit_timer))
2661 tp->ack.blocked = 0;
2662 tp->ack.pending = 0;
2664 #ifdef TCP_CLEAR_TIMERS 2665 if (timer_pending(&
tp->delack_timer) &&
2666 del_timer(&
tp->delack_timer))
2687 printk(
KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
2697 if (!mod_timer(&
tp->retransmit_timer,
tp->timeout))
2704 if (!mod_timer(&
tp->delack_timer,
tp->ack.timeout))
2723 int mss_now =
tp->mss_cache;
2725 if (
dst &&
dst->pmtu !=
tp->pmtu_cookie)
2749 unsigned int hint =
min(
tp->advmss,
tp->mss_cache);
2762 tp->pred_flags =
htonl((
tp->tcp_header_len << 26) |
2815 #define tcp_time_stamp ((__u32)(jiffies)) 2827 struct inet_skb_parm h4;
2829 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 2830 struct inet6_skb_parm h6;
2841 #define TCPCB_FLAG_FIN 0x01 2842 #define TCPCB_FLAG_SYN 0x02 2843 #define TCPCB_FLAG_RST 0x04 2844 #define TCPCB_FLAG_PSH 0x08 2845 #define TCPCB_FLAG_ACK 0x10 2846 #define TCPCB_FLAG_URG 0x20 2847 #define TCPCB_FLAG_ECE 0x40 2848 #define TCPCB_FLAG_CWR 0x80 2851 #define TCPCB_SACKED_ACKED 0x01 2852 #define TCPCB_SACKED_RETRANS 0x02 2853 #define TCPCB_LOST 0x04 2854 #define TCPCB_TAGBITS 0x07 2856 #define TCPCB_EVER_RETRANS 0x80 2857 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) 2859 #define TCPCB_URG 0x20 2861 #define TCPCB_AT_TAIL (TCPCB_URG) 2867 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 2869 #define for_retrans_queue(skb, sk, tp) \ 2870 for (skb = (sk)->write_queue.next; \ 2871 (skb != (tp)->send_head) && \ 2872 (skb != (struct sk_buff *)&(sk)->write_queue); \ 2918 return tp->packets_out -
tp->left_out +
tp->retrans_out;
2932 return max(
tp->snd_cwnd >> 1
U, 2
U);
2946 return tp->snd_ssthresh;
2948 return max(
tp->snd_ssthresh,
2949 ((
tp->snd_cwnd >> 1) +
2950 (
tp->snd_cwnd >> 2)));
2959 if (
tp->sack_ok &&
tp->sacked_out >=
tp->packets_out -
tp->lost_out)
2960 tp->sacked_out =
tp->packets_out -
tp->lost_out;
2961 tp->left_out =
tp->sacked_out +
tp->lost_out;
2972 if (
tp->packets_out >=
tp->snd_cwnd) {
2974 tp->snd_cwnd_used = 0;
2978 if (
tp->packets_out >
tp->snd_cwnd_used)
2979 tp->snd_cwnd_used =
tp->packets_out;
2991 tp->undo_marker = 0;
2993 tp->snd_cwnd =
min(
tp->snd_cwnd,
2995 tp->snd_cwnd_cnt = 0;
2996 tp->high_seq =
tp->snd_nxt;
2998 TCP_ECN_queue_cwr(
tp);
3005 tp->prior_ssthresh = 0;
3026 return after(
tp->snd_sml,
tp->snd_una) &&
3053 return (skb->
len < mss_now &&
3068 unsigned cur_mss,
int nonagle)
3095 return ((nonagle==1 ||
tp->urg_mode
3108 if (!
tp->packets_out && !
tp->pending)
3187 unsigned long saddr,
unsigned long daddr,
3223 tp->ucopy.memory = 0;
3241 if (
tp->ucopy.task) {
3244 if (
tp->ucopy.memory > sk->
rcvbuf) {
3251 sk->backlog_rcv(
sk, skb1);
3252 NET_INC_STATS_BH(TCPPrequeueDropped);
3255 tp->ucopy.memory = 0;
3257 wake_up_interruptible(
sk->sleep);
3273 static char *statename[]={
3274 "Unused",
"Established",
"Syn Sent",
"Syn Recv",
3275 "Fin Wait 1",
"Fin Wait 2",
"Time Wait",
"Close",
3276 "Close Wait",
"Last ACK",
"Listen",
"Closing" 3283 int oldstate =
sk->state;
3288 TCP_INC_STATS(TcpCurrEstab);
3292 sk->prot->unhash(
sk);
3293 if (
sk->prev && !(
sk->userlocks&SOCK_BINDPORT_LOCK))
3298 tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
3307 SOCK_DEBUG(
sk,
"TCP sk=%p, State %s -> %s\n",
sk, statename[oldstate],statename[
state]);
3318 sk->shutdown = SHUTDOWN_MASK;
3321 sk->state_change(
sk);
3339 if (
tp->tstamp_ok) {
3347 if (
tp->eff_sacks) {
3356 for(this_sack = 0; this_sack <
tp->eff_sacks; this_sack++) {
3374 int offer_wscale,
int wscale,
__u32 tstamp,
__u32 ts_recent)
3417 __u32 *window_clamp,
3422 unsigned int space = (__space < 0 ? 0 : __space);
3425 if (*window_clamp == 0)
3426 (*window_clamp) = (65535 << 14);
3443 while (
space > 65535 && (*rcv_wscale) < 14) {
3447 if (*rcv_wscale && sysctl_tcp_app_win &&
space>=mss &&
3448 space -
max((
space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
3456 if (mss > (1<<*rcv_wscale)) {
3460 else if (mss > 1460)
3462 if (*rcv_wnd > init_cwnd*mss)
3463 *rcv_wnd = init_cwnd*mss;
3466 (*window_clamp) =
min(65535
U << (*rcv_wscale), *window_clamp);
3473 return sysctl_tcp_adv_win_scale<=0 ?
3474 (
space>>(-sysctl_tcp_adv_win_scale)) :
3532 if (!
tp->accept_queue_tail) {
3533 tp->accept_queue = req;
3535 tp->accept_queue_tail->dl_next = req;
3537 tp->accept_queue_tail = req;
3551 static __inline
void 3557 if (--lopt->
qlen == 0)
3569 if (lopt->
qlen++ == 0)
3606 write_lock(&
tp->syn_wait_lock);
3608 write_unlock(&
tp->syn_wait_lock);
3629 req->
mss =
tp->mss_clamp;
3641 #define TCP_MEM_QUANTUM ((int)PAGE_SIZE) 3676 NET_INC_STATS(TCPMemoryPressures);
3685 if (!(sk->
userlocks&SOCK_SNDBUF_LOCK)) {
3699 if (
sk->forward_alloc >= (
int)skb->
truesize ||
3792 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
3801 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
3810 int fin_timeout =
tp->linger2 ? : sysctl_tcp_fin_timeout;
3812 if (fin_timeout < (
tp->rto<<2) - (
tp->rto>>1))
3813 fin_timeout = (
tp->rto<<2) - (
tp->rto>>1);
3824 if ((
s32)(
tp->rcv_tsval -
tp->ts_recent) >= 0)
3849 #define TCP_CHECK_TIMER(sk) do { } while (0)
static __inline void tcp_synq_removed(struct sock *sk, struct open_request *req)
struct tcp_tw_bucket * next_death
static __inline void tcp_listen_lock(void)
int tcp_v4_rcv(struct sk_buff *skb)
static __inline int __tcp_checksum_complete(struct sk_buff *skb)
const char timer_bug_msg[]
void tcp_time_wait(struct sock *sk, int state, int timeo)
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
#define CHECKSUM_UNNECESSARY
struct dst_entry * dst_cache
struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask)
void tcp_delete_keepalive_timer(struct sock *)
static __inline void tcp_sync_left_out(struct tcp_opt *tp)
void tcp_reset_keepalive_timer(struct sock *, unsigned long)
void tcp_v4_err(struct sk_buff *skb, u32)
static __inline void kfree_skb(struct sk_buff *skb)
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len)
struct sockaddr _sockaddr
static __inline void tcp_dec_quickack_mode(struct tcp_opt *tp)
static __inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
void tcp_push_one(struct sock *, unsigned mss_now)
static __inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
int(* queue_xmit)(struct sk_buff *skb)
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor)
static __inline void tcp_prequeue_init(struct tcp_opt *tp)
static __inline int tcp_sk_listen_hashfn(struct sock *sk)
void tcp_timewait_kill(struct tcp_tw_bucket *tw)
struct png_info_def **typedef void(__cdecl typeof(png_destroy_read_struct))(struct png_struct_def **
SOCKET WSAAPI socket(IN INT af, IN INT type, IN INT protocol)
struct timer_list retransmit_timer
struct tcp_bind_bucket * chain
static __inline void tcp_mem_reclaim(struct sock *sk)
struct sock * tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif)
static __inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump)
int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
struct tcp_v4_open_req v4_req
struct task_struct * task
void(* unhash)(struct sock *sk)
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static __inline unsigned char * __skb_put(struct sk_buff *skb, unsigned int len)
struct outqueuenode * head
wait_queue_head_t * sleep
unsigned char use_write_queue
static __inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list)
atomic_t tcp_sockets_allocated
struct sk_buff_head * list
static __inline struct sk_buff * dev_alloc_skb(unsigned int length)
int(* backlog_rcv)(struct sock *sk, struct sk_buff *skb)
static __inline int tcp_acceptq_is_full(struct sock *sk)
static __inline void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
int tcp_write_xmit(struct sock *, int nonagle)
static __inline void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
static __inline void kfree_skb_fast(struct sk_buff *skb)
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
int(* output)(struct sk_buff *)
void(* addr2sockaddr)(struct sock *sk, struct sockaddr *)
static __inline void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
struct sock * tcp_accept(struct sock *sk, int flags, int *err)
int(* sendmsg)(struct sock *sk, struct msghdr *msg, int len)
#define tcp_openreq_fastfree(req)
struct timer_list delack_timer
int skb_linearize(struct sk_buff *skb, int gfp)
unsigned int keepalive_intvl
int(* setsockopt)(struct sock *sk, int level, int optname, char *optval, int optlen)
static __inline int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
unsigned short max_ack_backlog
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss)
static __inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
void skb_add_mtu(int mtu)
void(* data_ready)(struct sock *sk, int bytes)
static __inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
int(* getsockopt)(struct sock *sk, int level, int optname, char *optval, int *option)
static __inline void skb_reserve(struct sk_buff *skb, unsigned int len)
__inline int before(__u32 seq1, __u32 seq2)
unsigned long rate_tokens
struct open_request * accept_queue
static __inline int skb_headroom(const struct sk_buff *skb)
struct tcp_func * af_specific
static __inline int keepalive_intvl_when(struct tcp_opt *tp)
int(* bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len)
int tcp_send_synack(struct sock *)
static __inline int skb_queue_empty(struct sk_buff_head *list)
static __inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
static __inline void tcp_writequeue_purge(struct sock *sk)
static __inline u32 tcp_receive_window(struct tcp_opt *tp)
int(* getsockopt)(struct sock *sk, int level, int optname, char *optval, int *optlen)
static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
int(* recvmsg)(struct sock *sk, struct msghdr *msg, int len, int noblock, int flags, int *addr_len)
static __inline void tcp_fast_path_on(struct tcp_opt *tp)
void tcp_send_ack(struct sock *sk)
void(* destroy)(struct dst_entry *)
unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum)
static __inline int pskb_trim(struct sk_buff *skb, unsigned int len)
static __inline int tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
kmem_cache_t * tcp_bucket_cachep
static __inline struct sk_buff * skb_get(struct sk_buff *skb)
struct tcp_opt::@989 ucopy
void(* error_report)(struct sock *sk)
static __inline unsigned char * skb_put(struct sk_buff *skb, unsigned int len)
static __inline void tcp_set_state(struct sock *sk, int state)
static __inline int skb_headlen(const struct sk_buff *skb)
int tcp_v4_remember_stamp(struct sock *sk)
static __inline void __tcp_enter_cwr(struct tcp_opt *tp)
GLbitfield GLuint64 timeout
static __inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
void tcp_cwnd_application_limited(struct sock *sk)
kmem_cache_t * tcp_openreq_cachep
union sock::@991 tp_pinfo
int(* remember_stamp)(struct sock *sk)
static __inline int tcp_synq_young(struct sock *sk)
void tcp_listen_wlock(void)
void tcp_send_fin(struct sock *sk)
int(* init)(struct sock *sk)
struct sock * cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt)
static __inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
int(* setsockopt)(struct sock *sk, int level, int optname, char *optval, int optlen)
void tcp_clear_retrans(struct tcp_opt *tp)
kmem_cache_t * tcp_timewait_cachep
static __inline void tcp_clear_options(struct tcp_opt *tp)
void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
static __inline int skb_tailroom(const struct sk_buff *skb)
void tcp_destroy_sock(struct sock *sk)
union sock::@992 protinfo
void tcp_inherit_port(struct sock *sk, struct sock *child)
static unsigned char bytes[4]
#define spin_lock_init(sl)
ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
#define TCPOLEN_SACK_BASE
static __inline void tcp_listen_unlock(void)
struct tcp_ehash_bucket * __tcp_ehash
void tcp_send_probe0(struct sock *)
static __inline void tcp_done(struct sock *sk)
union tcp_skb_cb::@994 header
static const WCHAR desc[]
struct tcp_sack_block duplicate_sack[1]
static __inline void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
GLenum GLuint GLenum GLsizei length
static __inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
void tcp_rfree(struct sk_buff *skb)
int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
static __inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
struct netlink_opt * af_netlink
struct tcp_bind_bucket * tcp_bucket_create(struct tcp_bind_hashbucket *head, unsigned short snum)
#define TCPOLEN_SACK_PERBLOCK
struct open_request * dl_next
static __inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
static __inline struct sk_buff * tcp_alloc_skb(struct sock *sk, int size, int gfp)
void(* send_check)(struct sock *sk, struct tcphdr *th, int len, struct sk_buff *skb)
#define TCP_SKB_CB(__skb)
struct tcp_bind_bucket * tb
struct sock * __tcp_listening_hash[TCP_LHTABLE_SIZE]
void tcp_parse_options(struct sk_buff *skb, struct tcp_opt *tp, int estab)
static __inline __u32 tcp_max_burst(struct tcp_opt *tp)
unsigned int tcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait)
void(* send_reset)(struct sk_buff *skb)
struct sock ** bind_pprev
static __inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
WDF_INTERRUPT_PRIORITY priority
struct tcp_bind_bucket ** pprev
struct tcp_bind_bucket __attribute__
int(* destroy)(struct sock *sk)
struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct open_request *req)
struct sk_buff * skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err)
void tcp_write_space(struct sock *sk)
void(* send_ack)(struct sk_buff *skb, struct open_request *req)
struct neighbour * neighbour
int(* ioctl)(struct sock *sk, int cmd, unsigned long arg)
static __inline struct sk_buff * skb_peek_tail(struct sk_buff_head *list_)
void tcp_xmit_retransmit_queue(struct sock *)
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
static __inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
struct sk_buff * skb_copy(const struct sk_buff *skb, int priority)
#define TCPOLEN_TIMESTAMP
static __inline int skb_is_nonlinear(const struct sk_buff *skb)
static __inline void tcp_sack_reset(struct tcp_opt *tp)
volatile unsigned char zapped
#define TCPOLEN_SACK_BASE_ALIGNED
static __inline int tcp_bhashfn(__u16 lport)
static __inline void tcp_acceptq_removed(struct sock *sk)
_In_ uint64_t _In_ uint64_t _In_ uint64_t _In_opt_ traverse_ptr * tp
int tcp_write_wakeup(struct sock *)
static __inline int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb, unsigned cur_mss, int nonagle)
unsigned short ack_backlog
void skb_under_panic(struct sk_buff *skb, int len, void *here)
void tcp_update_metrics(struct sock *sk)
struct or_calltable * class
int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
void(* link_failure)(struct sk_buff *)
static __inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
#define SKB_LINEAR_ASSERT(skb)
struct sk_buff * alloc_skb(unsigned int size, int priority)
static void atomic_inc(atomic_t volatile *v)
void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now)
asmlinkage int printk(const char *fmt,...)
struct sk_buff_head write_queue
void __tcp_mem_reclaim(struct sock *sk)
void tcp_simple_retransmit(struct sock *)
static __inline unsigned char * __skb_push(struct sk_buff *skb, unsigned int len)
int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb)
void tcp_send_partial(struct sock *)
static __inline void __skb_queue_purge(struct sk_buff_head *list)
static __inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
static __inline int tcp_ack_scheduled(struct tcp_opt *tp)
static __inline void skb_queue_head_init(struct sk_buff_head *list)
__u32 tcp_init_cwnd(struct tcp_opt *tp)
static __inline struct sk_buff * __dev_alloc_skb(unsigned int length, int gfp_mask)
struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
__kernel_size_t msg_controllen
void tcp_send_active_reset(struct sock *sk, int priority)
void __tcp_put_port(struct sock *sk)
int tcp_mem_schedule(struct sock *sk, int size, int kind)
void(* close)(struct sock *sk, long timeout)
static __inline struct sk_buff * tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len)
void kfree_skbmem(struct sk_buff *skb)
volatile unsigned char state
struct page * sndmsg_page
__kernel_size_t msg_iovlen
int(* conn_request)(struct sock *sk, struct sk_buff *skb)
static __inline struct page * tcp_alloc_page(struct sock *sk)
void tcp_shutdown(struct sock *sk, int how)
struct tcp_hashinfo tcp_hashinfo
static int nonblock(int fd, int isnonblock)
static __inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
static __inline void skb_orphan(struct sk_buff *skb)
void tcp_enter_quickack_mode(struct tcp_opt *tp)
static int atomic_dec_and_test(atomic_t volatile *v)
static __inline unsigned char * skb_push(struct sk_buff *skb, unsigned int len)
struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct open_request *req, struct dst_entry *dst)
static __inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req, struct open_request **prev)
static __inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
GLenum const GLvoid * addr
struct sk_buff * frag_list
void destroy(_Tp *__pointer)
void tcp_bucket_unlock(struct sock *sk)
static __inline struct sk_buff * skb_share_check(struct sk_buff *skb, int pri)
struct sk_buff_head out_of_order_queue
void tcp_close(struct sock *sk, long timeout)
int tcp_transmit_skb(struct sock *, struct sk_buff *)
static __inline void __tcp_push_pending_frames(struct sock *sk, struct tcp_opt *tp, unsigned cur_mss, int nonagle)
static __inline int skb_cloned(struct sk_buff *skb)
static __inline int skb_shared(struct sk_buff *skb)
int wake_up(wait_queue_head_t *queue)
static __inline void tcp_synq_added(struct sock *sk)
struct sock * tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
__inline int after(__u32 seq1, __u32 seq2)
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, int len, int nonblock, int flags, int *addr_len)
static __inline struct sk_buff * skb_dequeue(struct sk_buff_head *list)
void skb_over_panic(struct sk_buff *skb, int len, void *here)
atomic_t tcp_memory_allocated
static __inline void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale)
void(* destruct)(struct sock *sk)
spinlock_t __tcp_portalloc_lock
static __inline unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
static __inline int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
void(* shutdown)(struct sock *sk, int how)
struct open_request * accept_queue_tail
static __inline void * kmap_skb_frag(const skb_frag_t *frag)
static __inline int tcp_min_write_space(struct sock *sk)
struct sk_buff * send_head
static __inline struct sk_buff * skb_dequeue_tail(struct sk_buff_head *list)
static __inline void tcp_initialize_rcv_mss(struct sock *sk)
int(* disconnect)(struct sock *sk, int flags)
int tcp_connect(struct sock *sk)
static __inline void tcp_synq_drop(struct sock *sk, struct open_request *req, struct open_request **prev)
unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait)
static void atomic_add(int volatile i, atomic_t volatile *v)
struct sk_buff_head receive_queue
int(* rtx_syn_ack)(struct sock *sk, struct open_request *req, struct dst_entry *)
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
static __inline u16 tcp_v4_check(struct tcphdr *th, int len, unsigned long saddr, unsigned long daddr, unsigned long base)
static __inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
struct sock * tcp_check_req(struct sock *sk, struct sk_buff *skb, struct open_request *req, struct open_request **prev)
static __inline void kunmap_skb_frag(void *vaddr)
int tcp_disconnect(struct sock *sk, int flags)
static unsigned __int64 next
struct sk_buff * skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, int priority)
static __inline struct sk_buff * __skb_dequeue(struct sk_buff_head *list)
static __inline int tcp_wspace(struct sock *sk)
static __inline void tcp_tw_put(struct tcp_tw_bucket *tw)
static __inline int keepalive_time_when(struct tcp_opt *tp)
GLenum GLenum GLenum input
struct sock ** bind_pprev
void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
static __inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
int(* get_port)(struct sock *sk, unsigned short snum)
struct sock::@990 backlog
static __inline char * __skb_pull(struct sk_buff *skb, unsigned int len)
int skb_copy_datagram(const struct sk_buff *from, int offset, char *to, int size)
#define spin_unlock_irqrestore(sl, flags)
unsigned int keepalive_time
unsigned int __kernel_size_t
static __inline int tcp_synq_len(struct sock *sk)
void(* state_change)(struct sock *sk)
int tcp_v4_rebuild_header(struct sock *sk)
struct tcp_tw_bucket ** pprev_death
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
int(* backlog_rcv)(struct sock *sk, struct sk_buff *skb)
static __inline struct sk_buff * skb_peek(struct sk_buff_head *list_)
void __kfree_skb(struct sk_buff *skb)
void tcp_unhash(struct sock *sk)
static __inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
static __inline void tcp_schedule_ack(struct tcp_opt *tp)
struct tcp_bind_bucket * next
struct tcp_listen_opt * listen_opt
static __inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, struct sock *child)
static __inline void tcp_openreq_init(struct open_request *req, struct tcp_opt *tp, struct sk_buff *skb)
static __inline int tcp_paws_check(struct tcp_opt *tp, int rst)
int(* sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t)