003 File Manager
Current Path:
/usr/src/sys/netinet
usr
/
src
/
sys
/
netinet
/
📁
..
📄
accf_data.c
(1.95 KB)
📄
accf_dns.c
(3.37 KB)
📄
accf_http.c
(8.35 KB)
📁
cc
📄
dccp.h
(2.48 KB)
📄
icmp6.h
(26.97 KB)
📄
icmp_var.h
(3.78 KB)
📄
if_ether.c
(41.26 KB)
📄
if_ether.h
(4.62 KB)
📄
igmp.c
(98.93 KB)
📄
igmp.h
(5.36 KB)
📄
igmp_var.h
(8.69 KB)
📄
in.c
(43.68 KB)
📄
in.h
(25.81 KB)
📄
in_cksum.c
(4.09 KB)
📄
in_debug.c
(3.48 KB)
📄
in_fib.c
(8.62 KB)
📄
in_fib.h
(2.59 KB)
📄
in_fib_algo.c
(18.68 KB)
📄
in_gif.c
(11.76 KB)
📄
in_jail.c
(10.31 KB)
📄
in_kdtrace.c
(5.61 KB)
📄
in_kdtrace.h
(3.31 KB)
📄
in_mcast.c
(79.22 KB)
📄
in_pcb.c
(90.89 KB)
📄
in_pcb.h
(34.46 KB)
📄
in_pcbgroup.c
(17.28 KB)
📄
in_prot.c
(2.35 KB)
📄
in_proto.c
(9.36 KB)
📄
in_rmx.c
(4.99 KB)
📄
in_rss.c
(11.27 KB)
📄
in_rss.h
(2.42 KB)
📄
in_systm.h
(2.57 KB)
📄
in_var.h
(15.4 KB)
📄
ip.h
(7.37 KB)
📄
ip6.h
(8.93 KB)
📄
ip_carp.c
(53.25 KB)
📄
ip_carp.h
(6.79 KB)
📄
ip_divert.c
(21.74 KB)
📄
ip_divert.h
(2.32 KB)
📄
ip_dummynet.h
(8.94 KB)
📄
ip_ecn.c
(5.83 KB)
📄
ip_ecn.h
(2.1 KB)
📄
ip_encap.c
(10.59 KB)
📄
ip_encap.h
(3.13 KB)
📄
ip_fastfwd.c
(13.79 KB)
📄
ip_fw.h
(34.29 KB)
📄
ip_gre.c
(15.28 KB)
📄
ip_icmp.c
(31.15 KB)
📄
ip_icmp.h
(8.52 KB)
📄
ip_id.c
(9.78 KB)
📄
ip_input.c
(37.16 KB)
📄
ip_mroute.c
(75.86 KB)
📄
ip_mroute.h
(13.66 KB)
📄
ip_options.c
(19.9 KB)
📄
ip_options.h
(2.61 KB)
📄
ip_output.c
(40.67 KB)
📄
ip_reass.c
(22.42 KB)
📄
ip_var.h
(11.48 KB)
📁
khelp
📁
libalias
📁
netdump
📄
pim.h
(4.25 KB)
📄
pim_var.h
(3.14 KB)
📄
raw_ip.c
(28.58 KB)
📄
sctp.h
(22.97 KB)
📄
sctp_asconf.c
(100.05 KB)
📄
sctp_asconf.h
(3.31 KB)
📄
sctp_auth.c
(50.44 KB)
📄
sctp_auth.h
(8.52 KB)
📄
sctp_bsd_addr.c
(13.89 KB)
📄
sctp_bsd_addr.h
(2.45 KB)
📄
sctp_cc_functions.c
(68.75 KB)
📄
sctp_constants.h
(34.12 KB)
📄
sctp_crc32.c
(4.5 KB)
📄
sctp_crc32.h
(2.07 KB)
📄
sctp_header.h
(17.48 KB)
📄
sctp_indata.c
(175.36 KB)
📄
sctp_indata.h
(4.18 KB)
📄
sctp_input.c
(183.57 KB)
📄
sctp_input.h
(2.43 KB)
📄
sctp_kdtrace.c
(7.51 KB)
📄
sctp_kdtrace.h
(3.6 KB)
📄
sctp_lock_bsd.h
(15.64 KB)
📄
sctp_module.c
(5.16 KB)
📄
sctp_os.h
(2.73 KB)
📄
sctp_os_bsd.h
(15.04 KB)
📄
sctp_output.c
(362.89 KB)
📄
sctp_output.h
(6.38 KB)
📄
sctp_pcb.c
(198.87 KB)
📄
sctp_pcb.h
(19.31 KB)
📄
sctp_peeloff.c
(5.74 KB)
📄
sctp_peeloff.h
(2.05 KB)
📄
sctp_ss_functions.c
(30.72 KB)
📄
sctp_structs.h
(38.29 KB)
📄
sctp_syscalls.c
(13.94 KB)
📄
sctp_sysctl.c
(37.45 KB)
📄
sctp_sysctl.h
(22.9 KB)
📄
sctp_timer.c
(47.92 KB)
📄
sctp_timer.h
(3.07 KB)
📄
sctp_uio.h
(38.07 KB)
📄
sctp_usrreq.c
(216.08 KB)
📄
sctp_var.h
(12.21 KB)
📄
sctputil.c
(212.13 KB)
📄
sctputil.h
(11.69 KB)
📄
siftr.c
(45.2 KB)
📄
tcp.h
(18.3 KB)
📄
tcp_debug.c
(5.9 KB)
📄
tcp_debug.h
(2.65 KB)
📄
tcp_fastopen.c
(37.98 KB)
📄
tcp_fastopen.h
(3.7 KB)
📄
tcp_fsm.h
(4.25 KB)
📄
tcp_hostcache.c
(21.43 KB)
📄
tcp_hostcache.h
(2.92 KB)
📄
tcp_hpts.c
(57.55 KB)
📄
tcp_hpts.h
(9.89 KB)
📄
tcp_input.c
(115.95 KB)
📄
tcp_log_buf.c
(76.37 KB)
📄
tcp_log_buf.h
(14.88 KB)
📄
tcp_lro.c
(36.38 KB)
📄
tcp_lro.h
(4.43 KB)
📄
tcp_offload.c
(5.48 KB)
📄
tcp_offload.h
(2.09 KB)
📄
tcp_output.c
(60.16 KB)
📄
tcp_pcap.c
(12.67 KB)
📄
tcp_pcap.h
(1.76 KB)
📄
tcp_ratelimit.c
(43.8 KB)
📄
tcp_ratelimit.h
(5.07 KB)
📄
tcp_reass.c
(30.75 KB)
📄
tcp_sack.c
(26.74 KB)
📄
tcp_seq.h
(3.57 KB)
📁
tcp_stacks
📄
tcp_stats.c
(8.47 KB)
📄
tcp_subr.c
(92.74 KB)
📄
tcp_syncache.c
(73 KB)
📄
tcp_syncache.h
(5.16 KB)
📄
tcp_timer.c
(31.04 KB)
📄
tcp_timer.h
(9.29 KB)
📄
tcp_timewait.c
(19.06 KB)
📄
tcp_usrreq.c
(71.35 KB)
📄
tcp_var.h
(43.48 KB)
📄
tcpip.h
(2.29 KB)
📄
toecore.c
(13.51 KB)
📄
toecore.h
(5.13 KB)
📄
udp.h
(2.49 KB)
📄
udp_usrreq.c
(44.57 KB)
📄
udp_var.h
(6.48 KB)
📄
udplite.h
(1.91 KB)
Editing: tcp_pcap.c
/*- * Copyright (c) 2015 * Jonathan Looney. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include <sys/queue.h> #include <sys/param.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/socketvar.h> #include <sys/sysctl.h> #include <sys/systm.h> #include <sys/mbuf.h> #include <sys/eventhandler.h> #include <machine/atomic.h> #include <netinet/tcp_var.h> #include <netinet/tcp_pcap.h> #define M_LEADINGSPACE_NOWRITE(m) \ ((m)->m_data - M_START(m)) int tcp_pcap_aggressive_free = 1; static int tcp_pcap_clusters_referenced_cur = 0; static int tcp_pcap_clusters_referenced_max = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_aggressive_free, CTLFLAG_RW, &tcp_pcap_aggressive_free, 0, "Free saved packets when the memory system comes under pressure"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_clusters_referenced_cur, CTLFLAG_RD, &tcp_pcap_clusters_referenced_cur, 0, "Number of clusters currently referenced on TCP PCAP queues"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_clusters_referenced_max, CTLFLAG_RW, &tcp_pcap_clusters_referenced_max, 0, "Maximum number of clusters allowed to be referenced on TCP PCAP " "queues"); static int tcp_pcap_alloc_reuse_ext = 0; static int tcp_pcap_alloc_reuse_mbuf = 0; static int tcp_pcap_alloc_new_mbuf = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_reuse_ext, CTLFLAG_RD, &tcp_pcap_alloc_reuse_ext, 0, "Number of mbufs with external storage reused for the TCP PCAP " "functionality"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_reuse_mbuf, CTLFLAG_RD, &tcp_pcap_alloc_reuse_mbuf, 0, "Number of mbufs with internal storage reused for the TCP PCAP " "functionality"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_alloc_new_mbuf, CTLFLAG_RD, &tcp_pcap_alloc_new_mbuf, 0, "Number of new mbufs allocated for the TCP PCAP functionality"); VNET_DEFINE(int, tcp_pcap_packets) = 0; #define V_tcp_pcap_packets VNET(tcp_pcap_packets) SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_packets, CTLFLAG_RW, &VNET_NAME(tcp_pcap_packets), 0, "Default number of packets saved per direction per TCPCB"); /* Initialize the values. */ static void tcp_pcap_max_set(void) { tcp_pcap_clusters_referenced_max = nmbclusters / 4; } void tcp_pcap_init(void) { tcp_pcap_max_set(); EVENTHANDLER_REGISTER(nmbclusters_change, tcp_pcap_max_set, NULL, EVENTHANDLER_PRI_ANY); } /* * If we are below the maximum allowed cluster references, * increment the reference count and return TRUE. Otherwise, * leave the reference count alone and return FALSE. */ static __inline bool tcp_pcap_take_cluster_reference(void) { if (atomic_fetchadd_int(&tcp_pcap_clusters_referenced_cur, 1) >= tcp_pcap_clusters_referenced_max) { atomic_add_int(&tcp_pcap_clusters_referenced_cur, -1); return FALSE; } return TRUE; } /* * For all the external entries in m, apply the given adjustment. * This can be used to adjust the counter when an mbuf chain is * copied or freed. */ static __inline void tcp_pcap_adj_cluster_reference(struct mbuf *m, int adj) { while (m) { if (m->m_flags & M_EXT) atomic_add_int(&tcp_pcap_clusters_referenced_cur, adj); m = m->m_next; } } /* * Free all mbufs in a chain, decrementing the reference count as * necessary. * * Functions in this file should use this instead of m_freem() when * they are freeing mbuf chains that may contain clusters that were * already included in tcp_pcap_clusters_referenced_cur. */ static void tcp_pcap_m_freem(struct mbuf *mb) { while (mb != NULL) { if (mb->m_flags & M_EXT) atomic_subtract_int(&tcp_pcap_clusters_referenced_cur, 1); mb = m_free(mb); } } /* * Copy data from m to n, where n cannot fit all the data we might * want from m. * * Prioritize data like this: * 1. TCP header * 2. IP header * 3. Data */ static void tcp_pcap_copy_bestfit(struct tcphdr *th, struct mbuf *m, struct mbuf *n) { struct mbuf *m_cur = m; int bytes_to_copy=0, trailing_data, skip=0, tcp_off; /* Below, we assume these will be non-NULL. */ KASSERT(th, ("%s: called with th == NULL", __func__)); KASSERT(m, ("%s: called with m == NULL", __func__)); KASSERT(n, ("%s: called with n == NULL", __func__)); /* We assume this initialization occurred elsewhere. */ KASSERT(n->m_len == 0, ("%s: called with n->m_len=%d (expected 0)", __func__, n->m_len)); KASSERT(n->m_data == M_START(n), ("%s: called with n->m_data != M_START(n)", __func__)); /* * Calculate the size of the TCP header. We use this often * enough that it is worth just calculating at the start. */ tcp_off = th->th_off << 2; /* Trim off leading empty mbufs. */ while (m && m->m_len == 0) m = m->m_next; if (m) { m_cur = m; } else { /* * No data? Highly unusual. We would expect to at * least see a TCP header in the mbuf. * As we have a pointer to the TCP header, I guess * we should just copy that. (???) */ fallback: bytes_to_copy = tcp_off; if (bytes_to_copy > M_SIZE(n)) bytes_to_copy = M_SIZE(n); bcopy(th, n->m_data, bytes_to_copy); n->m_len = bytes_to_copy; return; } /* * Find TCP header. Record the total number of bytes up to, * and including, the TCP header. */ while (m_cur) { if ((caddr_t) th >= (caddr_t) m_cur->m_data && (caddr_t) th < (caddr_t) (m_cur->m_data + m_cur->m_len)) break; bytes_to_copy += m_cur->m_len; m_cur = m_cur->m_next; } if (m_cur) bytes_to_copy += (caddr_t) th - (caddr_t) m_cur->m_data; else goto fallback; bytes_to_copy += tcp_off; /* * If we already want to copy more bytes than we can hold * in the destination mbuf, skip leading bytes and copy * what we can. * * Otherwise, consider trailing data. */ if (bytes_to_copy > M_SIZE(n)) { skip = bytes_to_copy - M_SIZE(n); bytes_to_copy = M_SIZE(n); } else { /* * Determine how much trailing data is in the chain. * We start with the length of this mbuf (the one * containing th) and subtract the size of the TCP * header (tcp_off) and the size of the data prior * to th (th - m_cur->m_data). * * This *should not* be negative, as the TCP code * should put the whole TCP header in a single * mbuf. But, it isn't a problem if it is. We will * simple work off our negative balance as we look * at subsequent mbufs. */ trailing_data = m_cur->m_len - tcp_off; trailing_data -= (caddr_t) th - (caddr_t) m_cur->m_data; m_cur = m_cur->m_next; while (m_cur) { trailing_data += m_cur->m_len; m_cur = m_cur->m_next; } if ((bytes_to_copy + trailing_data) > M_SIZE(n)) bytes_to_copy = M_SIZE(n); else bytes_to_copy += trailing_data; } m_copydata(m, skip, bytes_to_copy, n->m_data); n->m_len = bytes_to_copy; } void tcp_pcap_add(struct tcphdr *th, struct mbuf *m, struct mbufq *queue) { struct mbuf *n = NULL, *mhead; KASSERT(th, ("%s: called with th == NULL", __func__)); KASSERT(m, ("%s: called with m == NULL", __func__)); KASSERT(queue, ("%s: called with queue == NULL", __func__)); /* We only care about data packets. */ while (m && m->m_type != MT_DATA) m = m->m_next; /* We only need to do something if we still have an mbuf. */ if (!m) return; /* If we are not saving mbufs, return now. */ if (queue->mq_maxlen == 0) return; /* * Check to see if we will need to recycle mbufs. * * If we need to get rid of mbufs to stay below * our packet count, try to reuse the mbuf. Once * we already have a new mbuf (n), then we can * simply free subsequent mbufs. * * Note that most of the logic in here is to deal * with the reuse. If we are fine with constant * mbuf allocs/deallocs, we could ditch this logic. * But, it only seems to make sense to reuse * mbufs we already have. */ while (mbufq_full(queue)) { mhead = mbufq_dequeue(queue); if (n) { tcp_pcap_m_freem(mhead); } else { /* * If this held an external cluster, try to * detach the cluster. But, if we held the * last reference, go through the normal * free-ing process. */ if (mhead->m_flags & M_EXTPG) { /* Don't mess around with these. */ tcp_pcap_m_freem(mhead); continue; } else if (mhead->m_flags & M_EXT) { switch (mhead->m_ext.ext_type) { case EXT_SFBUF: /* Don't mess around with these. */ tcp_pcap_m_freem(mhead); continue; default: if (atomic_fetchadd_int( mhead->m_ext.ext_cnt, -1) == 1) { /* * We held the last reference * on this cluster. Restore * the reference count and put * it back in the pool. */ *(mhead->m_ext.ext_cnt) = 1; tcp_pcap_m_freem(mhead); continue; } /* * We were able to cleanly free the * reference. */ atomic_subtract_int( &tcp_pcap_clusters_referenced_cur, 1); tcp_pcap_alloc_reuse_ext++; break; } } else { tcp_pcap_alloc_reuse_mbuf++; } n = mhead; tcp_pcap_m_freem(n->m_next); m_init(n, M_NOWAIT, MT_DATA, 0); } } /* Check to see if we need to get a new mbuf. */ if (!n) { if (!(n = m_get(M_NOWAIT, MT_DATA))) return; tcp_pcap_alloc_new_mbuf++; } /* * What are we dealing with? If a cluster, attach it. Otherwise, * try to copy the data from the beginning of the mbuf to the * end of data. (There may be data between the start of the data * area and the current data pointer. We want to get this, because * it may contain header information that is useful.) * In cases where that isn't possible, settle for what we can * get. */ if ((m->m_flags & (M_EXT|M_EXTPG)) && tcp_pcap_take_cluster_reference()) { n->m_data = m->m_data; n->m_len = m->m_len; mb_dupcl(n, m); } else if (((m->m_data + m->m_len) - M_START(m)) <= M_SIZE(n)) { /* * At this point, n is guaranteed to be a normal mbuf * with no cluster and no packet header. Because the * logic in this code block requires this, the assert * is here to catch any instances where someone * changes the logic to invalidate that assumption. */ KASSERT((n->m_flags & (M_EXT | M_PKTHDR)) == 0, ("%s: Unexpected flags (%#x) for mbuf", __func__, n->m_flags)); n->m_data = n->m_dat + M_LEADINGSPACE_NOWRITE(m); n->m_len = m->m_len; if (m->m_flags & M_EXTPG) m_copydata(m, 0, m->m_len, n->m_data); else bcopy(M_START(m), n->m_dat, m->m_len + M_LEADINGSPACE_NOWRITE(m)); } else { /* * This is the case where we need to "settle for what * we can get". The most probable way to this code * path is that we've already taken references to the * maximum number of mbuf clusters we can, and the data * is too long to fit in an mbuf's internal storage. * Try for a "best fit". */ tcp_pcap_copy_bestfit(th, m, n); /* Don't try to get additional data. */ goto add_to_queue; } if (m->m_next) { n->m_next = m_copym(m->m_next, 0, M_COPYALL, M_NOWAIT); tcp_pcap_adj_cluster_reference(n->m_next, 1); } add_to_queue: /* Add the new mbuf to the list. */ if (mbufq_enqueue(queue, n)) { /* This shouldn't happen. If INVARIANTS is defined, panic. */ KASSERT(0, ("%s: mbufq was unexpectedly full!", __func__)); tcp_pcap_m_freem(n); } } void tcp_pcap_drain(struct mbufq *queue) { struct mbuf *m; while ((m = mbufq_dequeue(queue))) tcp_pcap_m_freem(m); } void tcp_pcap_tcpcb_init(struct tcpcb *tp) { mbufq_init(&(tp->t_inpkts), V_tcp_pcap_packets); mbufq_init(&(tp->t_outpkts), V_tcp_pcap_packets); } void tcp_pcap_set_sock_max(struct mbufq *queue, int newval) { queue->mq_maxlen = newval; while (queue->mq_len > queue->mq_maxlen) tcp_pcap_m_freem(mbufq_dequeue(queue)); } int tcp_pcap_get_sock_max(struct mbufq *queue) { return queue->mq_maxlen; }
Upload File
Create Folder