2 * inet_diag.c Module for monitoring INET transport protocols sockets.
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
35 #include <linux/inet_diag.h>
36 #include <linux/sock_diag.h>
38 static const struct inet_diag_handler **inet_diag_table;
40 struct inet_diag_entry {
49 static DEFINE_MUTEX(inet_diag_table_mutex);
51 static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
53 if (!inet_diag_table[proto])
54 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
55 NETLINK_SOCK_DIAG, AF_INET, proto);
57 mutex_lock(&inet_diag_table_mutex);
58 if (!inet_diag_table[proto])
59 return ERR_PTR(-ENOENT);
61 return inet_diag_table[proto];
64 static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
66 mutex_unlock(&inet_diag_table_mutex);
69 static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
71 r->idiag_family = sk->sk_family;
73 r->id.idiag_sport = htons(sk->sk_num);
74 r->id.idiag_dport = sk->sk_dport;
75 r->id.idiag_if = sk->sk_bound_dev_if;
76 sock_diag_save_cookie(sk, r->id.idiag_cookie);
78 #if IS_ENABLED(CONFIG_IPV6)
79 if (sk->sk_family == AF_INET6) {
80 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
81 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
85 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
86 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
88 r->id.idiag_src[0] = sk->sk_rcv_saddr;
89 r->id.idiag_dst[0] = sk->sk_daddr;
93 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
94 struct sk_buff *skb, const struct inet_diag_req_v2 *req,
95 struct user_namespace *user_ns,
96 u32 portid, u32 seq, u16 nlmsg_flags,
97 const struct nlmsghdr *unlh)
99 const struct inet_sock *inet = inet_sk(sk);
100 const struct inet_diag_handler *handler;
101 int ext = req->idiag_ext;
102 struct inet_diag_msg *r;
103 struct nlmsghdr *nlh;
107 handler = inet_diag_table[req->sdiag_protocol];
110 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
116 BUG_ON(!sk_fullsock(sk));
118 inet_diag_msg_common_fill(r, sk);
119 r->idiag_state = sk->sk_state;
121 r->idiag_retrans = 0;
123 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
126 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
127 * hence this needs to be included regardless of socket family.
129 if (ext & (1 << (INET_DIAG_TOS - 1)))
130 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
133 #if IS_ENABLED(CONFIG_IPV6)
134 if (r->idiag_family == AF_INET6) {
135 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
136 if (nla_put_u8(skb, INET_DIAG_TCLASS,
137 inet6_sk(sk)->tclass) < 0)
142 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
143 r->idiag_inode = sock_i_ino(sk);
145 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
146 struct inet_diag_meminfo minfo = {
147 .idiag_rmem = sk_rmem_alloc_get(sk),
148 .idiag_wmem = sk->sk_wmem_queued,
149 .idiag_fmem = sk->sk_forward_alloc,
150 .idiag_tmem = sk_wmem_alloc_get(sk),
153 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
157 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
158 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
162 handler->idiag_get_info(sk, r, NULL);
166 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
168 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
169 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
170 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
172 r->idiag_retrans = icsk->icsk_retransmits;
173 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
174 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
176 r->idiag_retrans = icsk->icsk_probes_out;
177 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
178 } else if (timer_pending(&sk->sk_timer)) {
180 r->idiag_retrans = icsk->icsk_probes_out;
181 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
184 r->idiag_expires = 0;
188 if (ext & (1 << (INET_DIAG_INFO - 1))) {
189 attr = nla_reserve(skb, INET_DIAG_INFO,
190 sizeof(struct tcp_info));
194 info = nla_data(attr);
197 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
198 if (nla_put_string(skb, INET_DIAG_CONG,
199 icsk->icsk_ca_ops->name) < 0)
202 handler->idiag_get_info(sk, r, info);
204 if (sk->sk_state < TCP_TIME_WAIT &&
205 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
206 icsk->icsk_ca_ops->get_info(sk, ext, skb);
213 nlmsg_cancel(skb, nlh);
216 EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
218 static int inet_csk_diag_fill(struct sock *sk,
220 const struct inet_diag_req_v2 *req,
221 struct user_namespace *user_ns,
222 u32 portid, u32 seq, u16 nlmsg_flags,
223 const struct nlmsghdr *unlh)
225 return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
226 user_ns, portid, seq, nlmsg_flags, unlh);
229 static int inet_twsk_diag_fill(struct sock *sk,
231 u32 portid, u32 seq, u16 nlmsg_flags,
232 const struct nlmsghdr *unlh)
234 struct inet_timewait_sock *tw = inet_twsk(sk);
235 struct inet_diag_msg *r;
236 struct nlmsghdr *nlh;
239 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
245 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
247 tmo = tw->tw_ttd - inet_tw_time_stamp();
251 inet_diag_msg_common_fill(r, sk);
252 r->idiag_retrans = 0;
254 r->idiag_state = tw->tw_substate;
256 r->idiag_expires = jiffies_to_msecs(tmo);
266 static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
267 u32 portid, u32 seq, u16 nlmsg_flags,
268 const struct nlmsghdr *unlh)
270 struct inet_diag_msg *r;
271 struct nlmsghdr *nlh;
274 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
280 inet_diag_msg_common_fill(r, sk);
281 r->idiag_state = TCP_SYN_RECV;
283 r->idiag_retrans = inet_reqsk(sk)->num_retrans;
285 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
286 offsetof(struct sock, sk_cookie));
288 tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
289 r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
299 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
300 const struct inet_diag_req_v2 *r,
301 struct user_namespace *user_ns,
302 u32 portid, u32 seq, u16 nlmsg_flags,
303 const struct nlmsghdr *unlh)
305 if (sk->sk_state == TCP_TIME_WAIT)
306 return inet_twsk_diag_fill(sk, skb, portid, seq,
309 if (sk->sk_state == TCP_NEW_SYN_RECV)
310 return inet_req_diag_fill(sk, skb, portid, seq,
313 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
317 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
318 struct sk_buff *in_skb,
319 const struct nlmsghdr *nlh,
320 const struct inet_diag_req_v2 *req)
322 struct net *net = sock_net(in_skb->sk);
328 if (req->sdiag_family == AF_INET)
329 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
330 req->id.idiag_dport, req->id.idiag_src[0],
331 req->id.idiag_sport, req->id.idiag_if);
332 #if IS_ENABLED(CONFIG_IPV6)
333 else if (req->sdiag_family == AF_INET6)
334 sk = inet6_lookup(net, hashinfo,
335 (struct in6_addr *)req->id.idiag_dst,
337 (struct in6_addr *)req->id.idiag_src,
348 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
352 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
353 sizeof(struct inet_diag_meminfo) +
354 sizeof(struct tcp_info) + 64, GFP_KERNEL);
360 err = sk_diag_fill(sk, rep, req,
361 sk_user_ns(NETLINK_CB(in_skb).sk),
362 NETLINK_CB(in_skb).portid,
363 nlh->nlmsg_seq, 0, nlh);
365 WARN_ON(err == -EMSGSIZE);
369 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
381 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
383 static int inet_diag_get_exact(struct sk_buff *in_skb,
384 const struct nlmsghdr *nlh,
385 const struct inet_diag_req_v2 *req)
387 const struct inet_diag_handler *handler;
390 handler = inet_diag_lock_handler(req->sdiag_protocol);
392 err = PTR_ERR(handler);
394 err = handler->dump_one(in_skb, nlh, req);
395 inet_diag_unlock_handler(handler);
400 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
402 int words = bits >> 5;
407 if (memcmp(a1, a2, words << 2))
417 mask = htonl((0xffffffff) << (32 - bits));
419 if ((w1 ^ w2) & mask)
426 static int inet_diag_bc_run(const struct nlattr *_bc,
427 const struct inet_diag_entry *entry)
429 const void *bc = nla_data(_bc);
430 int len = nla_len(_bc);
434 const struct inet_diag_bc_op *op = bc;
437 case INET_DIAG_BC_NOP:
439 case INET_DIAG_BC_JMP:
442 case INET_DIAG_BC_S_GE:
443 yes = entry->sport >= op[1].no;
445 case INET_DIAG_BC_S_LE:
446 yes = entry->sport <= op[1].no;
448 case INET_DIAG_BC_D_GE:
449 yes = entry->dport >= op[1].no;
451 case INET_DIAG_BC_D_LE:
452 yes = entry->dport <= op[1].no;
454 case INET_DIAG_BC_AUTO:
455 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
457 case INET_DIAG_BC_S_COND:
458 case INET_DIAG_BC_D_COND: {
459 const struct inet_diag_hostcond *cond;
462 cond = (const struct inet_diag_hostcond *)(op + 1);
463 if (cond->port != -1 &&
464 cond->port != (op->code == INET_DIAG_BC_S_COND ?
465 entry->sport : entry->dport)) {
470 if (op->code == INET_DIAG_BC_S_COND)
475 if (cond->family != AF_UNSPEC &&
476 cond->family != entry->family) {
477 if (entry->family == AF_INET6 &&
478 cond->family == AF_INET) {
479 if (addr[0] == 0 && addr[1] == 0 &&
480 addr[2] == htonl(0xffff) &&
481 bitstring_match(addr + 3,
490 if (cond->prefix_len == 0)
492 if (bitstring_match(addr, cond->addr,
511 /* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
513 static void entry_fill_addrs(struct inet_diag_entry *entry,
514 const struct sock *sk)
516 #if IS_ENABLED(CONFIG_IPV6)
517 if (sk->sk_family == AF_INET6) {
518 entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
519 entry->daddr = sk->sk_v6_daddr.s6_addr32;
523 entry->saddr = &sk->sk_rcv_saddr;
524 entry->daddr = &sk->sk_daddr;
528 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
530 struct inet_sock *inet = inet_sk(sk);
531 struct inet_diag_entry entry;
536 entry.family = sk->sk_family;
537 entry_fill_addrs(&entry, sk);
538 entry.sport = inet->inet_num;
539 entry.dport = ntohs(inet->inet_dport);
540 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
542 return inet_diag_bc_run(bc, &entry);
544 EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
546 static int valid_cc(const void *bc, int len, int cc)
549 const struct inet_diag_bc_op *op = bc;
555 if (op->yes < 4 || op->yes & 3)
563 /* Validate an inet_diag_hostcond. */
564 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
567 struct inet_diag_hostcond *cond;
570 /* Check hostcond space. */
571 *min_len += sizeof(struct inet_diag_hostcond);
574 cond = (struct inet_diag_hostcond *)(op + 1);
576 /* Check address family and address length. */
577 switch (cond->family) {
582 addr_len = sizeof(struct in_addr);
585 addr_len = sizeof(struct in6_addr);
590 *min_len += addr_len;
594 /* Check prefix length (in bits) vs address length (in bytes). */
595 if (cond->prefix_len > 8 * addr_len)
601 /* Validate a port comparison operator. */
602 static bool valid_port_comparison(const struct inet_diag_bc_op *op,
603 int len, int *min_len)
605 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
606 *min_len += sizeof(struct inet_diag_bc_op);
612 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
614 const void *bc = bytecode;
615 int len = bytecode_len;
618 int min_len = sizeof(struct inet_diag_bc_op);
619 const struct inet_diag_bc_op *op = bc;
622 case INET_DIAG_BC_S_COND:
623 case INET_DIAG_BC_D_COND:
624 if (!valid_hostcond(bc, len, &min_len))
627 case INET_DIAG_BC_S_GE:
628 case INET_DIAG_BC_S_LE:
629 case INET_DIAG_BC_D_GE:
630 case INET_DIAG_BC_D_LE:
631 if (!valid_port_comparison(bc, len, &min_len))
634 case INET_DIAG_BC_AUTO:
635 case INET_DIAG_BC_JMP:
636 case INET_DIAG_BC_NOP:
642 if (op->code != INET_DIAG_BC_NOP) {
643 if (op->no < min_len || op->no > len + 4 || op->no & 3)
646 !valid_cc(bytecode, bytecode_len, len - op->no))
650 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
655 return len == 0 ? 0 : -EINVAL;
658 static int inet_csk_diag_dump(struct sock *sk,
660 struct netlink_callback *cb,
661 const struct inet_diag_req_v2 *r,
662 const struct nlattr *bc)
664 if (!inet_diag_bc_sk(bc, sk))
667 return inet_csk_diag_fill(sk, skb, r,
668 sk_user_ns(NETLINK_CB(cb->skb).sk),
669 NETLINK_CB(cb->skb).portid,
670 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
673 static void twsk_build_assert(void)
675 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
676 offsetof(struct sock, sk_family));
678 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
679 offsetof(struct inet_sock, inet_num));
681 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
682 offsetof(struct inet_sock, inet_dport));
684 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
685 offsetof(struct inet_sock, inet_rcv_saddr));
687 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
688 offsetof(struct inet_sock, inet_daddr));
690 #if IS_ENABLED(CONFIG_IPV6)
691 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
692 offsetof(struct sock, sk_v6_rcv_saddr));
694 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
695 offsetof(struct sock, sk_v6_daddr));
699 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
700 struct netlink_callback *cb,
701 const struct inet_diag_req_v2 *r,
702 const struct nlattr *bc)
704 struct inet_connection_sock *icsk = inet_csk(sk);
705 struct inet_sock *inet = inet_sk(sk);
706 struct inet_diag_entry entry;
707 int j, s_j, reqnum, s_reqnum;
708 struct listen_sock *lopt;
712 s_reqnum = cb->args[4];
717 entry.family = sk->sk_family;
719 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
721 lopt = icsk->icsk_accept_queue.listen_opt;
722 if (!lopt || !listen_sock_qlen(lopt))
726 entry.sport = inet->inet_num;
727 entry.userlocks = sk->sk_userlocks;
730 for (j = s_j; j < lopt->nr_table_entries; j++) {
731 struct request_sock *req, *head = lopt->syn_table[j];
734 for (req = head; req; reqnum++, req = req->dl_next) {
735 struct inet_request_sock *ireq = inet_rsk(req);
737 if (reqnum < s_reqnum)
739 if (r->id.idiag_dport != ireq->ir_rmt_port &&
744 /* Note: entry.sport and entry.userlocks are already set */
745 entry_fill_addrs(&entry, req_to_sk(req));
746 entry.dport = ntohs(ireq->ir_rmt_port);
748 if (!inet_diag_bc_run(bc, &entry))
752 err = inet_req_diag_fill(req_to_sk(req), skb,
753 NETLINK_CB(cb->skb).portid,
755 NLM_F_MULTI, cb->nlh);
758 cb->args[4] = reqnum;
767 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
772 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
773 struct netlink_callback *cb,
774 const struct inet_diag_req_v2 *r, struct nlattr *bc)
776 struct net *net = sock_net(skb->sk);
777 int i, num, s_i, s_num;
780 s_num = num = cb->args[2];
782 if (cb->args[0] == 0) {
783 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
786 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
787 struct inet_listen_hashbucket *ilb;
788 struct hlist_nulls_node *node;
792 ilb = &hashinfo->listening_hash[i];
793 spin_lock_bh(&ilb->lock);
794 sk_nulls_for_each(sk, node, &ilb->head) {
795 struct inet_sock *inet = inet_sk(sk);
797 if (!net_eq(sock_net(sk), net))
805 if (r->sdiag_family != AF_UNSPEC &&
806 sk->sk_family != r->sdiag_family)
809 if (r->id.idiag_sport != inet->inet_sport &&
813 if (!(r->idiag_states & TCPF_LISTEN) ||
818 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
819 spin_unlock_bh(&ilb->lock);
824 if (!(r->idiag_states & TCPF_SYN_RECV))
827 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
828 spin_unlock_bh(&ilb->lock);
837 spin_unlock_bh(&ilb->lock);
845 s_i = num = s_num = 0;
848 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
851 for (i = s_i; i <= hashinfo->ehash_mask; i++) {
852 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
853 spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
854 struct hlist_nulls_node *node;
859 if (hlist_nulls_empty(&head->chain))
866 sk_nulls_for_each(sk, node, &head->chain) {
869 if (!net_eq(sock_net(sk), net))
873 state = (sk->sk_state == TCP_TIME_WAIT) ?
874 inet_twsk(sk)->tw_substate : sk->sk_state;
875 if (!(r->idiag_states & (1 << state)))
877 if (r->sdiag_family != AF_UNSPEC &&
878 sk->sk_family != r->sdiag_family)
880 if (r->id.idiag_sport != htons(sk->sk_num) &&
883 if (r->id.idiag_dport != sk->sk_dport &&
888 if (!inet_diag_bc_sk(bc, sk))
891 res = sk_diag_fill(sk, skb, r,
892 sk_user_ns(NETLINK_CB(cb->skb).sk),
893 NETLINK_CB(cb->skb).portid,
894 cb->nlh->nlmsg_seq, NLM_F_MULTI,
897 spin_unlock_bh(lock);
904 spin_unlock_bh(lock);
913 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
915 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
916 const struct inet_diag_req_v2 *r,
919 const struct inet_diag_handler *handler;
922 handler = inet_diag_lock_handler(r->sdiag_protocol);
923 if (!IS_ERR(handler))
924 handler->dump(skb, cb, r, bc);
926 err = PTR_ERR(handler);
927 inet_diag_unlock_handler(handler);
929 return err ? : skb->len;
932 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
934 int hdrlen = sizeof(struct inet_diag_req_v2);
935 struct nlattr *bc = NULL;
937 if (nlmsg_attrlen(cb->nlh, hdrlen))
938 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
940 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
943 static int inet_diag_type2proto(int type)
946 case TCPDIAG_GETSOCK:
948 case DCCPDIAG_GETSOCK:
955 static int inet_diag_dump_compat(struct sk_buff *skb,
956 struct netlink_callback *cb)
958 struct inet_diag_req *rc = nlmsg_data(cb->nlh);
959 int hdrlen = sizeof(struct inet_diag_req);
960 struct inet_diag_req_v2 req;
961 struct nlattr *bc = NULL;
963 req.sdiag_family = AF_UNSPEC; /* compatibility */
964 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
965 req.idiag_ext = rc->idiag_ext;
966 req.idiag_states = rc->idiag_states;
969 if (nlmsg_attrlen(cb->nlh, hdrlen))
970 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
972 return __inet_diag_dump(skb, cb, &req, bc);
975 static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
976 const struct nlmsghdr *nlh)
978 struct inet_diag_req *rc = nlmsg_data(nlh);
979 struct inet_diag_req_v2 req;
981 req.sdiag_family = rc->idiag_family;
982 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
983 req.idiag_ext = rc->idiag_ext;
984 req.idiag_states = rc->idiag_states;
987 return inet_diag_get_exact(in_skb, nlh, &req);
990 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
992 int hdrlen = sizeof(struct inet_diag_req);
993 struct net *net = sock_net(skb->sk);
995 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
996 nlmsg_len(nlh) < hdrlen)
999 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1000 if (nlmsg_attrlen(nlh, hdrlen)) {
1001 struct nlattr *attr;
1003 attr = nlmsg_find_attr(nlh, hdrlen,
1004 INET_DIAG_REQ_BYTECODE);
1006 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1007 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1011 struct netlink_dump_control c = {
1012 .dump = inet_diag_dump_compat,
1014 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
1018 return inet_diag_get_exact_compat(skb, nlh);
1021 static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
1023 int hdrlen = sizeof(struct inet_diag_req_v2);
1024 struct net *net = sock_net(skb->sk);
1026 if (nlmsg_len(h) < hdrlen)
1029 if (h->nlmsg_flags & NLM_F_DUMP) {
1030 if (nlmsg_attrlen(h, hdrlen)) {
1031 struct nlattr *attr;
1033 attr = nlmsg_find_attr(h, hdrlen,
1034 INET_DIAG_REQ_BYTECODE);
1036 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1037 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1041 struct netlink_dump_control c = {
1042 .dump = inet_diag_dump,
1044 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
1048 return inet_diag_get_exact(skb, h, nlmsg_data(h));
1051 static const struct sock_diag_handler inet_diag_handler = {
1053 .dump = inet_diag_handler_dump,
1056 static const struct sock_diag_handler inet6_diag_handler = {
1058 .dump = inet_diag_handler_dump,
1061 int inet_diag_register(const struct inet_diag_handler *h)
1063 const __u16 type = h->idiag_type;
1066 if (type >= IPPROTO_MAX)
1069 mutex_lock(&inet_diag_table_mutex);
1071 if (!inet_diag_table[type]) {
1072 inet_diag_table[type] = h;
1075 mutex_unlock(&inet_diag_table_mutex);
1079 EXPORT_SYMBOL_GPL(inet_diag_register);
1081 void inet_diag_unregister(const struct inet_diag_handler *h)
1083 const __u16 type = h->idiag_type;
1085 if (type >= IPPROTO_MAX)
1088 mutex_lock(&inet_diag_table_mutex);
1089 inet_diag_table[type] = NULL;
1090 mutex_unlock(&inet_diag_table_mutex);
1092 EXPORT_SYMBOL_GPL(inet_diag_unregister);
1094 static int __init inet_diag_init(void)
1096 const int inet_diag_table_size = (IPPROTO_MAX *
1097 sizeof(struct inet_diag_handler *));
1100 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
1101 if (!inet_diag_table)
1104 err = sock_diag_register(&inet_diag_handler);
1108 err = sock_diag_register(&inet6_diag_handler);
1112 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
1117 sock_diag_unregister(&inet_diag_handler);
1119 kfree(inet_diag_table);
1123 static void __exit inet_diag_exit(void)
1125 sock_diag_unregister(&inet6_diag_handler);
1126 sock_diag_unregister(&inet_diag_handler);
1127 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
1128 kfree(inet_diag_table);
1131 module_init(inet_diag_init);
1132 module_exit(inet_diag_exit);
1133 MODULE_LICENSE("GPL");
1134 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
1135 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);