summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
commit8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch)
treee9891aa6c295060d065adffd610c4f49ecf884f3 /net
parenta71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff)
Linux-libre 4.3.2-gnu
Diffstat (limited to 'net')
-rw-r--r--net/6lowpan/Makefile2
-rw-r--r--net/6lowpan/core.c40
-rw-r--r--net/6lowpan/iphc.c19
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/9p/trans_rdma.c26
-rw-r--r--net/Kconfig7
-rw-r--r--net/atm/br2684.c9
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/batman-adv/bat_iv_ogm.c132
-rw-r--r--net/batman-adv/bitarray.c6
-rw-r--r--net/batman-adv/bitarray.h10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c78
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h8
-rw-r--r--net/batman-adv/debugfs.h1
-rw-r--r--net/batman-adv/distributed-arp-table.c62
-rw-r--r--net/batman-adv/distributed-arp-table.h8
-rw-r--r--net/batman-adv/fragmentation.c13
-rw-r--r--net/batman-adv/gateway_client.c83
-rw-r--r--net/batman-adv/gateway_client.h4
-rw-r--r--net/batman-adv/gateway_common.c67
-rw-r--r--net/batman-adv/gateway_common.h1
-rw-r--r--net/batman-adv/hard-interface.c44
-rw-r--r--net/batman-adv/hash.c6
-rw-r--r--net/batman-adv/hash.h12
-rw-r--r--net/batman-adv/icmp_socket.c6
-rw-r--r--net/batman-adv/icmp_socket.h1
-rw-r--r--net/batman-adv/main.c86
-rw-r--r--net/batman-adv/main.h50
-rw-r--r--net/batman-adv/multicast.c35
-rw-r--r--net/batman-adv/multicast.h2
-rw-r--r--net/batman-adv/network-coding.c55
-rw-r--r--net/batman-adv/network-coding.h4
-rw-r--r--net/batman-adv/originator.c110
-rw-r--r--net/batman-adv/originator.h16
-rw-r--r--net/batman-adv/packet.h204
-rw-r--r--net/batman-adv/routing.c24
-rw-r--r--net/batman-adv/routing.h6
-rw-r--r--net/batman-adv/send.c11
-rw-r--r--net/batman-adv/send.h11
-rw-r--r--net/batman-adv/soft-interface.c30
-rw-r--r--net/batman-adv/soft-interface.h4
-rw-r--r--net/batman-adv/sysfs.c4
-rw-r--r--net/batman-adv/sysfs.h2
-rw-r--r--net/batman-adv/translation-table.c307
-rw-r--r--net/batman-adv/translation-table.h31
-rw-r--r--net/batman-adv/types.h115
-rw-r--r--net/bluetooth/6lowpan.c32
-rw-r--r--net/bluetooth/Kconfig5
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c17
-rw-r--r--net/bluetooth/a2mp.h19
-rw-r--r--net/bluetooth/amp.c134
-rw-r--r--net/bluetooth/amp.h14
-rw-r--r--net/bluetooth/cmtp/capi.c8
-rw-r--r--net/bluetooth/hci_conn.c344
-rw-r--r--net/bluetooth/hci_core.c37
-rw-r--r--net/bluetooth/hci_event.c231
-rw-r--r--net/bluetooth/hci_request.c6
-rw-r--r--net/bluetooth/hidp/core.c14
-rw-r--r--net/bluetooth/l2cap_core.c6
-rw-r--r--net/bluetooth/l2cap_sock.c41
-rw-r--r--net/bluetooth/mgmt.c74
-rw-r--r--net/bluetooth/sco.c5
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_mdb.c144
-rw-r--r--net/bridge/br_multicast.c44
-rw-r--r--net/bridge/br_netfilter_hooks.c20
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c53
-rw-r--r--net/bridge/br_private.h21
-rw-r--r--net/bridge/br_vlan.c70
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/crypto.c4
-rw-r--r--net/ceph/messenger.c83
-rw-r--r--net/ceph/mon_client.c37
-rw-r--r--net/ceph/osd_client.c64
-rw-r--r--net/ceph/osdmap.c2
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c90
-rw-r--r--net/core/dst.c112
-rw-r--r--net/core/fib_rules.c34
-rw-r--r--net/core/filter.c157
-rw-r--r--net/core/flow_dissector.c284
-rw-r--r--net/core/lwtunnel.c249
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net-sysfs.c43
-rw-r--r--net/core/net-traces.c1
-rw-r--r--net/core/netpoll.c12
-rw-r--r--net/core/pktgen.c5
-rw-r--r--net/core/rtnetlink.c48
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c14
-rw-r--r--net/core/timestamping.c6
-rw-r--r--net/core/utils.c17
-rw-r--r--net/dccp/ackvec.c12
-rw-r--r--net/dccp/ccid.c3
-rw-r--r--net/dccp/minisocks.c4
-rw-r--r--net/decnet/dn_rules.c1
-rw-r--r--net/dsa/dsa.c206
-rw-r--r--net/dsa/dsa_priv.h8
-rw-r--r--net/dsa/slave.c394
-rw-r--r--net/dsa/tag_brcm.c15
-rw-r--r--net/dsa/tag_dsa.c12
-rw-r--r--net/dsa/tag_edsa.c12
-rw-r--r--net/dsa/tag_trailer.c14
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/ieee802154/6lowpan/6lowpan_i.h11
-rw-r--r--net/ieee802154/6lowpan/core.c81
-rw-r--r--net/ieee802154/6lowpan/rx.c45
-rw-r--r--net/ieee802154/6lowpan/tx.c4
-rw-r--r--net/ieee802154/nl802154.c45
-rw-r--r--net/ieee802154/rdev-ops.h33
-rw-r--r--net/ieee802154/sysfs.c38
-rw-r--r--net/ieee802154/trace.h41
-rw-r--r--net/ipv4/Kconfig14
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c49
-rw-r--r--net/ipv4/ah4.c4
-rw-r--r--net/ipv4/arp.c111
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/fib_frontend.c90
-rw-r--r--net/ipv4/fib_rules.c1
-rw-r--r--net/ipv4/fib_semantics.c260
-rw-r--r--net/ipv4/fib_trie.c14
-rw-r--r--net/ipv4/fou.c32
-rw-r--r--net/ipv4/geneve_core.c447
-rw-r--r--net/ipv4/gre_demux.c235
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/icmp.c13
-rw-r--r--net/ipv4/igmp.c26
-rw-r--r--net/ipv4/inet_hashtables.c38
-rw-r--r--net/ipv4/inet_timewait_sock.c71
-rw-r--r--net/ipv4/inetpeer.c20
-rw-r--r--net/ipv4/ip_fragment.c47
-rw-r--r--net/ipv4/ip_gre.c464
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_output.c7
-rw-r--r--net/ipv4/ip_tunnel.c37
-rw-r--r--net/ipv4/ip_tunnel_core.c236
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/netfilter/Makefile3
-rw-r--r--net/ipv4/netfilter/arp_tables.c13
-rw-r--r--net/ipv4/netfilter/ip_tables.c40
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c2
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c4
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c22
-rw-r--r--net/ipv4/netfilter/nf_dup_ipv4.c121
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c110
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c50
-rw-r--r--net/ipv4/sysctl_net_ipv4.c30
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_cong.c15
-rw-r--r--net/ipv4/tcp_cubic.c14
-rw-r--r--net/ipv4/tcp_dctcp.c2
-rw-r--r--net/ipv4/tcp_highspeed.c2
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_hybla.c2
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c92
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_metrics.c83
-rw-r--r--net/ipv4/tcp_minisocks.c19
-rw-r--r--net/ipv4/tcp_output.c67
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv4/udp.c23
-rw-r--r--net/ipv4/udp_tunnel.c25
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv4/xfrm4_policy.c20
-rw-r--r--net/ipv6/Kconfig30
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/addrconf.c410
-rw-r--r--net/ipv6/addrconf_core.c11
-rw-r--r--net/ipv6/af_inet6.c12
-rw-r--r--net/ipv6/ah6.c4
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/fib6_rules.c25
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/ila.c229
-rw-r--r--net/ipv6/inet6_hashtables.c9
-rw-r--r--net/ipv6/ip6_fib.c39
-rw-r--r--net/ipv6/ip6_gre.c98
-rw-r--r--net/ipv6/ip6_input.c5
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c149
-rw-r--r--net/ipv6/ip6_udp_tunnel.c9
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/ndisc.c42
-rw-r--r--net/ipv6/netfilter/Kconfig13
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c29
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c9
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c23
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c97
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nf_nat_proto_icmpv6.c2
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c108
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/route.c209
-rw-r--r--net/ipv6/sit.c28
-rw-r--r--net/ipv6/sysctl_net_ipv6.c15
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c3
-rw-r--r--net/ipv6/xfrm6_output.c18
-rw-r--r--net/ipv6/xfrm6_policy.c21
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/aes_cmac.c17
-rw-r--r--net/mac80211/cfg.c164
-rw-r--r--net/mac80211/chan.c31
-rw-r--r--net/mac80211/debugfs.c4
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_netdev.c34
-rw-r--r--net/mac80211/driver-ops.c41
-rw-r--r--net/mac80211/driver-ops.h29
-rw-r--r--net/mac80211/ieee80211_i.h42
-rw-r--r--net/mac80211/iface.c16
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/key.h3
-rw-r--r--net/mac80211/main.c17
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh_hwmp.c80
-rw-r--r--net/mac80211/mesh_plink.c326
-rw-r--r--net/mac80211/mesh_ps.c42
-rw-r--r--net/mac80211/mesh_sync.c16
-rw-r--r--net/mac80211/mlme.c98
-rw-r--r--net/mac80211/ocb.c2
-rw-r--r--net/mac80211/rate.c310
-rw-r--r--net/mac80211/rate.h60
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c8
-rw-r--r--net/mac80211/rx.c101
-rw-r--r--net/mac80211/sta_info.c52
-rw-r--r--net/mac80211/sta_info.h120
-rw-r--r--net/mac80211/status.c6
-rw-r--r--net/mac80211/tdls.c312
-rw-r--r--net/mac80211/trace.h10
-rw-r--r--net/mac80211/tx.c39
-rw-r--r--net/mac80211/util.c82
-rw-r--r--net/mac80211/vht.c49
-rw-r--r--net/mac80211/wpa.c83
-rw-r--r--net/mac802154/cfg.c81
-rw-r--r--net/mac802154/ieee802154_i.h11
-rw-r--r--net/mac802154/iface.c32
-rw-r--r--net/mac802154/main.c19
-rw-r--r--net/mac802154/rx.c14
-rw-r--r--net/mac802154/tx.c27
-rw-r--r--net/mac802154/util.c8
-rw-r--r--net/mpls/Kconfig8
-rw-r--r--net/mpls/Makefile1
-rw-r--r--net/mpls/af_mpls.c201
-rw-r--r--net/mpls/internal.h9
-rw-r--r--net/mpls/mpls_iptunnel.c231
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/core.c236
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/ipvs/Kconfig11
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c143
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ovf.c86
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c269
-rw-r--r--net/netfilter/nf_conntrack_core.c127
-rw-r--r--net/netfilter/nf_conntrack_expect.c21
-rw-r--r--net/netfilter/nf_conntrack_labels.c34
-rw-r--r--net/netfilter/nf_conntrack_netlink.c228
-rw-r--r--net/netfilter/nf_conntrack_pptp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c101
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c9
-rw-r--r--net/netfilter/nf_conntrack_standalone.c39
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_nat_core.c24
-rw-r--r--net/netfilter/nf_nat_proto_dccp.c2
-rw-r--r--net/netfilter/nf_nat_proto_tcp.c2
-rw-r--r--net/netfilter/nf_nat_proto_udp.c2
-rw-r--r--net/netfilter/nf_nat_proto_udplite.c2
-rw-r--r--net/netfilter/nf_queue.c12
-rw-r--r--net/netfilter/nf_synproxy_core.c12
-rw-r--r--net/netfilter/nf_tables_api.c8
-rw-r--r--net/netfilter/nf_tables_core.c5
-rw-r--r--net/netfilter/nfnetlink_acct.c71
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/nft_counter.c97
-rw-r--r--net/netfilter/nft_limit.c188
-rw-r--r--net/netfilter/nft_meta.c4
-rw-r--r--net/netfilter/nft_payload.c57
-rw-r--r--net/netfilter/x_tables.c29
-rw-r--r--net/netfilter/xt_CT.c26
-rw-r--r--net/netfilter/xt_TCPMSS.c8
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c2
-rw-r--r--net/netfilter/xt_TEE.c167
-rw-r--r--net/netfilter/xt_TPROXY.c6
-rw-r--r--net/netfilter/xt_connlabel.c16
-rw-r--r--net/netfilter/xt_connlimit.c9
-rw-r--r--net/netfilter/xt_nfacct.c2
-rw-r--r--net/netlink/af_netlink.c96
-rw-r--r--net/nfc/nci/core.c18
-rw-r--r--net/nfc/nci/hci.c90
-rw-r--r--net/nfc/netlink.c86
-rw-r--r--net/openvswitch/Kconfig6
-rw-r--r--net/openvswitch/Makefile6
-rw-r--r--net/openvswitch/actions.c270
-rw-r--r--net/openvswitch/conntrack.c778
-rw-r--r--net/openvswitch/conntrack.h93
-rw-r--r--net/openvswitch/datapath.c110
-rw-r--r--net/openvswitch/datapath.h21
-rw-r--r--net/openvswitch/dp_notify.c5
-rw-r--r--net/openvswitch/flow.c41
-rw-r--r--net/openvswitch/flow.h90
-rw-r--r--net/openvswitch/flow_netlink.c351
-rw-r--r--net/openvswitch/flow_netlink.h19
-rw-r--r--net/openvswitch/flow_table.c6
-rw-r--r--net/openvswitch/vport-geneve.c177
-rw-r--r--net/openvswitch/vport-gre.c247
-rw-r--r--net/openvswitch/vport-internal_dev.c143
-rw-r--r--net/openvswitch/vport-netdev.c137
-rw-r--r--net/openvswitch/vport-netdev.h16
-rw-r--r--net/openvswitch/vport-vxlan.c238
-rw-r--r--net/openvswitch/vport-vxlan.h11
-rw-r--r--net/openvswitch/vport.c204
-rw-r--r--net/openvswitch/vport.h80
-rw-r--r--net/packet/af_packet.c216
-rw-r--r--net/packet/internal.h5
-rw-r--r--net/rds/af_rds.c9
-rw-r--r--net/rds/bind.c3
-rw-r--r--net/rds/connection.c33
-rw-r--r--net/rds/ib.c24
-rw-r--r--net/rds/ib.h6
-rw-r--r--net/rds/ib_cm.c26
-rw-r--r--net/rds/ib_rdma.c57
-rw-r--r--net/rds/ib_recv.c82
-rw-r--r--net/rds/ib_send.c13
-rw-r--r--net/rds/iw.c12
-rw-r--r--net/rds/iw_cm.c5
-rw-r--r--net/rds/iw_rdma.c5
-rw-r--r--net/rds/iw_send.c5
-rw-r--r--net/rds/rdma.c9
-rw-r--r--net/rds/rdma_transport.c15
-rw-r--r--net/rds/rds.h24
-rw-r--r--net/rds/send.c57
-rw-r--r--net/rds/tcp.c165
-rw-r--r--net/rds/tcp.h7
-rw-r--r--net/rds/tcp_connect.c9
-rw-r--r--net/rds/tcp_listen.c40
-rw-r--r--net/rds/tcp_recv.c11
-rw-r--r--net/rds/transport.c4
-rw-r--r--net/rfkill/Kconfig3
-rw-r--r--net/rfkill/core.c10
-rw-r--r--net/rfkill/rfkill-gpio.c1
-rw-r--r--net/sched/act_api.c47
-rw-r--r--net/sched/act_bpf.c87
-rw-r--r--net/sched/act_connmark.c9
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_gact.c44
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c75
-rw-r--r--net/sched/act_nat.c10
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/act_vlan.c3
-rw-r--r--net/sched/cls_cgroup.c23
-rw-r--r--net/sched/cls_flow.c2
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/cls_rsvp.h18
-rw-r--r--net/sched/cls_tcindex.c29
-rw-r--r--net/sched/sch_api.c55
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_choke.c6
-rw-r--r--net/sched/sch_drr.c2
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c56
-rw-r--r--net/sched/sch_gred.c8
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_hhf.c11
-rw-r--r--net/sched/sch_htb.c8
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_plug.c8
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_qfq.c3
-rw-r--r--net/sched/sch_sfb.c4
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/associola.c20
-rw-r--r--net/sctp/protocol.c48
-rw-r--r--net/sctp/sm_make_chunk.c15
-rw-r--r--net/sctp/sm_sideeffect.c46
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sunrpc/auth_unix.c2
-rw-r--r--net/sunrpc/cache.c103
-rw-r--r--net/sunrpc/sched.c12
-rw-r--r--net/sunrpc/svc.c113
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c6
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c17
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c197
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c14
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c73
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c45
-rw-r--r--net/sunrpc/xprtrdma/transport.c79
-rw-r--r--net/sunrpc/xprtrdma/verbs.c253
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h28
-rw-r--r--net/sunrpc/xprtsock.c12
-rw-r--r--net/switchdev/switchdev.c122
-rw-r--r--net/sysctl_net.c6
-rw-r--r--net/tipc/bcast.c69
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c30
-rw-r--r--net/tipc/bearer.h3
-rw-r--r--net/tipc/core.h10
-rw-r--r--net/tipc/discover.c130
-rw-r--r--net/tipc/link.c2031
-rw-r--r--net/tipc/link.h109
-rw-r--r--net/tipc/msg.c99
-rw-r--r--net/tipc/msg.h112
-rw-r--r--net/tipc/name_distr.c6
-rw-r--r--net/tipc/netlink_compat.c2
-rw-r--r--net/tipc/node.c979
-rw-r--r--net/tipc/node.h84
-rw-r--r--net/tipc/socket.c385
-rw-r--r--net/tipc/socket.h2
-rw-r--r--net/tipc/udp_media.c13
-rw-r--r--net/vmw_vsock/af_vsock.c7
-rw-r--r--net/vmw_vsock/vmci_transport.c173
-rw-r--r--net/vmw_vsock/vmci_transport.h4
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/mlme.c75
-rw-r--r--net/wireless/nl80211.c16
-rw-r--r--net/wireless/rdev-ops.h2
-rw-r--r--net/wireless/reg.c75
-rw-r--r--net/xfrm/xfrm_algo.c14
-rw-r--r--net/xfrm/xfrm_policy.c24
-rw-r--r--net/xfrm/xfrm_user.c12
462 files changed, 16357 insertions, 9175 deletions
diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile
index eb8baa72a..c6ffc55ee 100644
--- a/net/6lowpan/Makefile
+++ b/net/6lowpan/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_6LOWPAN) += 6lowpan.o
-6lowpan-y := iphc.o nhc.o
+6lowpan-y := core.o iphc.o nhc.o
#rfc6282 nhcs
obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
new file mode 100644
index 000000000..ae1896fa4
--- /dev/null
+++ b/net/6lowpan/core.c
@@ -0,0 +1,40 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
+ */
+
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
+{
+ lowpan_priv(dev)->lltype = lltype;
+}
+EXPORT_SYMBOL(lowpan_netdev_setup);
+
+static int __init lowpan_module_init(void)
+{
+ request_module_nowait("ipv6");
+
+ request_module_nowait("nhc_dest");
+ request_module_nowait("nhc_fragment");
+ request_module_nowait("nhc_hop");
+ request_module_nowait("nhc_ipv6");
+ request_module_nowait("nhc_mobility");
+ request_module_nowait("nhc_routing");
+ request_module_nowait("nhc_udp");
+
+ return 0;
+}
+module_init(lowpan_module_init);
+
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 94a375c04..1e0071fdc 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -48,7 +48,6 @@
#include <linux/bitops.h>
#include <linux/if_arp.h>
-#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/6lowpan.h>
#include <net/ipv6.h>
@@ -284,7 +283,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
return -EINVAL;
- hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+ hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
skb_pull(skb, 2);
break;
@@ -610,19 +609,3 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(lowpan_header_compress);
-
-static int __init lowpan_module_init(void)
-{
- request_module_nowait("nhc_dest");
- request_module_nowait("nhc_fragment");
- request_module_nowait("nhc_hop");
- request_module_nowait("nhc_ipv6");
- request_module_nowait("nhc_mobility");
- request_module_nowait("nhc_routing");
- request_module_nowait("nhc_udp");
-
- return 0;
-}
-module_init(lowpan_module_init);
-
-MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 01d7ba840..fded86508 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -791,10 +791,9 @@ void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->priv_flags |= IFF_802_1Q_VLAN;
+ dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
- dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
dev->destructor = vlan_dev_free;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 37a78d20c..ba1210253 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -94,8 +94,6 @@ struct p9_trans_rdma {
struct ib_pd *pd;
struct ib_qp *qp;
struct ib_cq *cq;
- struct ib_mr *dma_mr;
- u32 lkey;
long timeout;
int sq_depth;
struct semaphore sq_sem;
@@ -382,9 +380,6 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
if (!rdma)
return;
- if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
- ib_dereg_mr(rdma->dma_mr);
-
if (rdma->qp && !IS_ERR(rdma->qp))
ib_destroy_qp(rdma->qp);
@@ -415,7 +410,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
sge.addr = c->busa;
sge.length = client->msize;
- sge.lkey = rdma->lkey;
+ sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
c->wc_op = IB_WC_RECV;
@@ -506,7 +501,7 @@ dont_need_post_recv:
sge.addr = c->busa;
sge.length = c->req->tc->size;
- sge.lkey = rdma->lkey;
+ sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
c->wc_op = IB_WC_SEND;
@@ -647,7 +642,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
struct p9_trans_rdma *rdma;
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
- struct ib_device_attr devattr;
struct ib_cq_init_attr cq_attr = {};
/* Parse the transport specific mount options */
@@ -700,11 +694,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
goto error;
- /* Query the device attributes */
- err = ib_query_device(rdma->cm_id->device, &devattr);
- if (err)
- goto error;
-
/* Create the Completion Queue */
cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
@@ -719,17 +708,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
if (IS_ERR(rdma->pd))
goto error;
- /* Cache the DMA lkey in the transport */
- rdma->dma_mr = NULL;
- if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
- rdma->lkey = rdma->cm_id->device->local_dma_lkey;
- else {
- rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(rdma->dma_mr))
- goto error;
- rdma->lkey = rdma->dma_mr->lkey;
- }
-
/* Create the Queue Pair */
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.event_handler = qp_event_handler;
diff --git a/net/Kconfig b/net/Kconfig
index 57a7c5af3..7021c1bf4 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -374,6 +374,13 @@ source "net/caif/Kconfig"
source "net/ceph/Kconfig"
source "net/nfc/Kconfig"
+config LWTUNNEL
+ bool "Network light weight tunnels"
+ ---help---
+ This feature provides an infrastructure to support light weight
+ tunnels like mpls. There is no netdevice associated with a light
+ weight tunnel endpoint. Tunnel encapsulation parameters are stored
+ with light weight tunnel state associated with fib routes.
endif # if NET
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index cc78538d1..aa0047c5c 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -802,13 +802,10 @@ static int br2684_seq_show(struct seq_file *seq, void *v)
(brdev->payload == p_bridged) ? "bridged" : "routed",
brvcc->copies_failed, brvcc->copies_needed);
#ifdef CONFIG_ATM_BR2684_IPFILTER
-#define b1(var, byte) ((u8 *) &brvcc->filter.var)[byte]
-#define bs(var) b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
if (brvcc->filter.netmask != 0)
- seq_printf(seq, " filter=%d.%d.%d.%d/"
- "%d.%d.%d.%d\n", bs(prefix), bs(netmask));
-#undef bs
-#undef b1
+ seq_printf(seq, " filter=%pI4/%pI4\n",
+ &brvcc->filter.prefix,
+ &brvcc->filter.netmask);
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
return 0;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 17e55dfec..e07f551a8 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -317,6 +317,9 @@ static int clip_constructor(struct neighbour *neigh)
static int clip_encap(struct atm_vcc *vcc, int mode)
{
+ if (!CLIP_VCC(vcc))
+ return -EBADFD;
+
CLIP_VCC(vcc)->encap = mode;
return 0;
}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 753383c22..912d9c36f 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -77,8 +77,7 @@ enum batadv_dup_status {
* @lq_index: index to store the value at
* @value: value to store in the ring buffer
*/
-static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
- uint8_t value)
+static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
{
lq_recv[*lq_index] = value;
*lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
@@ -91,12 +90,12 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
*
* Returns computed average value.
*/
-static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
+static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
{
- const uint8_t *ptr;
- uint16_t count = 0;
- uint16_t i = 0;
- uint16_t sum = 0;
+ const u8 *ptr;
+ u16 count = 0;
+ u16 i = 0;
+ u16 sum = 0;
ptr = lq_recv;
@@ -113,7 +112,7 @@ static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
if (count == 0)
return 0;
- return (uint8_t)(sum / count);
+ return (u8)(sum / count);
}
/**
@@ -155,14 +154,14 @@ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
kfree(orig_node->bat_iv.bcast_own);
orig_node->bat_iv.bcast_own = data_ptr;
- data_ptr = kmalloc_array(max_if_num, sizeof(uint8_t), GFP_ATOMIC);
+ data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
if (!data_ptr) {
kfree(orig_node->bat_iv.bcast_own);
goto unlock;
}
memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
- (max_if_num - 1) * sizeof(uint8_t));
+ (max_if_num - 1) * sizeof(u8));
kfree(orig_node->bat_iv.bcast_own_sum);
orig_node->bat_iv.bcast_own_sum = data_ptr;
@@ -215,19 +214,19 @@ free_bcast_own:
if (max_if_num == 0)
goto free_own_sum;
- data_ptr = kmalloc_array(max_if_num, sizeof(uint8_t), GFP_ATOMIC);
+ data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
if (!data_ptr) {
kfree(orig_node->bat_iv.bcast_own);
goto unlock;
}
memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
- del_if_num * sizeof(uint8_t));
+ del_if_num * sizeof(u8));
- if_offset = (del_if_num + 1) * sizeof(uint8_t);
- memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
+ if_offset = (del_if_num + 1) * sizeof(u8);
+ memcpy((char *)data_ptr + del_if_num * sizeof(u8),
orig_node->bat_iv.bcast_own_sum + if_offset,
- (max_if_num - del_if_num) * sizeof(uint8_t));
+ (max_if_num - del_if_num) * sizeof(u8));
free_own_sum:
kfree(orig_node->bat_iv.bcast_own_sum);
@@ -250,7 +249,7 @@ unlock:
* If the object does not exists it is created an initialised.
*/
static struct batadv_orig_node *
-batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
{
struct batadv_orig_node *orig_node;
int size, hash_added;
@@ -270,7 +269,7 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
if (!orig_node->bat_iv.bcast_own)
goto free_orig_node;
- size = bat_priv->num_ifaces * sizeof(uint8_t);
+ size = bat_priv->num_ifaces * sizeof(u8);
orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
if (!orig_node->bat_iv.bcast_own_sum)
goto free_orig_node;
@@ -293,43 +292,17 @@ free_orig_node:
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
- const uint8_t *neigh_addr,
+ const u8 *neigh_addr,
struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh)
{
- struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
+ struct batadv_neigh_node *neigh_node;
- neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
+ neigh_node = batadv_neigh_node_new(orig_node, hard_iface, neigh_addr);
if (!neigh_node)
goto out;
- if (!atomic_inc_not_zero(&hard_iface->refcount)) {
- kfree(neigh_node);
- neigh_node = NULL;
- goto out;
- }
-
neigh_node->orig_node = orig_neigh;
- neigh_node->if_incoming = hard_iface;
-
- spin_lock_bh(&orig_node->neigh_list_lock);
- tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
- neigh_addr);
- if (!tmp_neigh_node) {
- hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
- } else {
- kfree(neigh_node);
- batadv_hardif_free_ref(hard_iface);
- neigh_node = tmp_neigh_node;
- }
- spin_unlock_bh(&orig_node->neigh_list_lock);
-
- if (!tmp_neigh_node)
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Creating new neighbor %pM for orig_node %pM on interface %s\n",
- neigh_addr, orig_node->orig,
- hard_iface->net_dev->name);
out:
return neigh_node;
@@ -339,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
unsigned char *ogm_buff;
- uint32_t random_seqno;
+ u32 random_seqno;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
@@ -411,8 +384,7 @@ static unsigned long batadv_iv_ogm_fwd_send_time(void)
}
/* apply hop penalty for a normal link */
-static uint8_t batadv_hop_penalty(uint8_t tq,
- const struct batadv_priv *bat_priv)
+static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
{
int hop_penalty = atomic_read(&bat_priv->hop_penalty);
int new_tq;
@@ -442,11 +414,11 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
const char *fwd_str;
- uint8_t packet_num;
- int16_t buff_pos;
+ u8 packet_num;
+ s16 buff_pos;
struct batadv_ogm_packet *batadv_ogm_packet;
struct sk_buff *skb;
- uint8_t *packet_pos;
+ u8 *packet_pos;
if (hard_iface->if_status != BATADV_IF_ACTIVE)
return;
@@ -837,7 +809,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- uint16_t tvlv_len;
+ u16 tvlv_len;
if (batadv_ogm_packet->ttl <= 1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -896,9 +868,9 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
struct hlist_head *head;
struct batadv_orig_node *orig_node;
unsigned long *word;
- uint32_t i;
+ u32 i;
size_t word_index;
- uint8_t *w;
+ u8 *w;
int if_num;
for (i = 0; i < hash->size; i++) {
@@ -927,8 +899,8 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *primary_if, *tmp_hard_iface;
int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
- uint32_t seqno;
- uint16_t tvlv_len = 0;
+ u32 seqno;
+ u16 tvlv_len = 0;
unsigned long send_time;
primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -947,7 +919,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet->tvlv_len = htons(tvlv_len);
/* change sequence number to network order */
- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
+ seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno);
batadv_ogm_packet->seqno = htonl(seqno);
atomic_inc(&hard_iface->bat_iv.ogm_seqno);
@@ -970,7 +942,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
rcu_read_lock();
list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
- continue;
+ continue;
batadv_iv_ogm_queue_add(bat_priv, *ogm_buff,
*ogm_buff_len, hard_iface,
tmp_hard_iface, 1, send_time);
@@ -1006,13 +978,14 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
{
struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
- struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_neigh_node *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_node_tmp;
int if_num;
- uint8_t sum_orig, sum_neigh;
- uint8_t *neigh_addr;
- uint8_t tq_avg;
+ u8 sum_orig, sum_neigh;
+ u8 *neigh_addr;
+ u8 tq_avg;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"update_originator(): Searching and updating originator entry of received packet\n");
@@ -1164,8 +1137,8 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
struct batadv_neigh_ifinfo *neigh_ifinfo;
- uint8_t total_count;
- uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+ u8 total_count;
+ u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
unsigned int combined_tq;
@@ -1311,13 +1284,13 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
struct batadv_neigh_node *neigh_node;
struct batadv_neigh_ifinfo *neigh_ifinfo;
int is_dup;
- int32_t seq_diff;
+ s32 seq_diff;
int need_update = 0;
int set_mark;
enum batadv_dup_status ret = BATADV_NO_DUP;
- uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
- uint8_t *neigh_addr;
- uint8_t packet_count;
+ u32 seqno = ntohl(batadv_ogm_packet->seqno);
+ u8 *neigh_addr;
+ u8 packet_count;
unsigned long *bitmap;
orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
@@ -1406,7 +1379,8 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct batadv_neigh_node *router = NULL, *router_router = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_neigh_node *router_router = NULL;
struct batadv_orig_node *orig_neigh_node;
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_node *orig_neigh_router = NULL;
@@ -1418,7 +1392,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
bool sameseq, similar_ttl;
struct sk_buff *skb_priv;
struct ethhdr *ethhdr;
- uint8_t *prev_sender;
+ u8 *prev_sender;
int is_bidirect;
/* create a private copy of the skb, as some functions change tq value
@@ -1600,7 +1574,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
struct batadv_orig_node *orig_neigh_node, *orig_node;
struct batadv_hard_iface *hard_iface;
struct batadv_ogm_packet *ogm_packet;
- uint32_t if_incoming_seqno;
+ u32 if_incoming_seqno;
bool has_directlink_flag;
struct ethhdr *ethhdr;
bool is_my_oldorig = false;
@@ -1673,9 +1647,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
if (is_my_orig) {
unsigned long *word;
int offset;
- int32_t bit_pos;
- int16_t if_num;
- uint8_t *weight;
+ s32 bit_pos;
+ s16 if_num;
+ u8 *weight;
orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
ethhdr->h_source);
@@ -1751,7 +1725,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_ogm_packet *ogm_packet;
- uint8_t *packet_pos;
+ u8 *packet_pos;
int ogm_offset;
bool ret;
@@ -1835,7 +1809,7 @@ static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
unsigned long last_seen_jiffies;
struct hlist_head *head;
int batman_count = 0;
- uint32_t i;
+ u32 i;
seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
"Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
@@ -1903,7 +1877,7 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing2)
{
struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
- uint8_t tq1, tq2;
+ u8 tq1, tq2;
int diff;
neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
@@ -1945,7 +1919,7 @@ batadv_iv_ogm_neigh_is_eob(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing2)
{
struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
- uint8_t tq1, tq2;
+ u8 tq1, tq2;
bool ret;
neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index cf68c3283..25cbc36e9 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -21,7 +21,7 @@
#include <linux/bitmap.h>
/* shift the packet array by n places. */
-static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
+static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
{
if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return;
@@ -35,8 +35,8 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
* 1 if the window was moved (either new or very old)
* 0 if the window was not moved/shifted.
*/
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
- int32_t seq_num_diff, int set_mark)
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
+ int set_mark)
{
struct batadv_priv *bat_priv = priv;
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 0c2456225..0226b220f 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -28,9 +28,9 @@
* and curr_seqno is within range of last_seqno. Otherwise returns 0.
*/
static inline int batadv_test_bit(const unsigned long *seq_bits,
- uint32_t last_seqno, uint32_t curr_seqno)
+ u32 last_seqno, u32 curr_seqno)
{
- int32_t diff;
+ s32 diff;
diff = last_seqno - curr_seqno;
if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
@@ -39,7 +39,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits,
}
/* turn corresponding bit on, so we can remember that we got the packet */
-static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
+static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
{
/* if too old, just drop it */
if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
@@ -51,7 +51,7 @@ static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
/* receive and process one packet, returns 1 if received seq_num is considered
* new, 0 if old
*/
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
- int32_t seq_num_diff, int set_mark);
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
+ int set_mark);
#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ba0609292..191a70290 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -51,7 +51,7 @@
#include "packet.h"
#include "translation-table.h"
-static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
static void batadv_bla_periodic_work(struct work_struct *work);
static void
@@ -59,10 +59,10 @@ batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_bla_backbone_gw *backbone_gw);
/* return the index of the claim */
-static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
+static inline u32 batadv_choose_claim(const void *data, u32 size)
{
struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
- uint32_t hash = 0;
+ u32 hash = 0;
hash = jhash(&claim->addr, sizeof(claim->addr), hash);
hash = jhash(&claim->vid, sizeof(claim->vid), hash);
@@ -71,11 +71,10 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
}
/* return the index of the backbone gateway */
-static inline uint32_t batadv_choose_backbone_gw(const void *data,
- uint32_t size)
+static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
{
const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
- uint32_t hash = 0;
+ u32 hash = 0;
hash = jhash(&claim->addr, sizeof(claim->addr), hash);
hash = jhash(&claim->vid, sizeof(claim->vid), hash);
@@ -89,7 +88,8 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node,
{
const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
hash_entry);
- const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
+ const struct batadv_bla_backbone_gw *gw1 = data1;
+ const struct batadv_bla_backbone_gw *gw2 = data2;
if (!batadv_compare_eth(gw1->orig, gw2->orig))
return 0;
@@ -106,7 +106,8 @@ static int batadv_compare_claim(const struct hlist_node *node,
{
const void *data1 = container_of(node, struct batadv_bla_claim,
hash_entry);
- const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
+ const struct batadv_bla_claim *cl1 = data1;
+ const struct batadv_bla_claim *cl2 = data2;
if (!batadv_compare_eth(cl1->addr, cl2->addr))
return 0;
@@ -192,8 +193,8 @@ static struct batadv_bla_claim
* Returns claim if found or NULL otherwise.
*/
static struct batadv_bla_backbone_gw *
-batadv_backbone_hash_find(struct batadv_priv *bat_priv,
- uint8_t *addr, unsigned short vid)
+batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+ unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
@@ -269,14 +270,14 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
-static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
unsigned short vid, int claimtype)
{
struct sk_buff *skb;
struct ethhdr *ethhdr;
struct batadv_hard_iface *primary_if;
struct net_device *soft_iface;
- uint8_t *hw_src;
+ u8 *hw_src;
struct batadv_bla_claim_dst local_claim_dest;
__be32 zeroip = 0;
@@ -304,13 +305,13 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
* with XX = claim type
* and YY:YY = group id
*/
- (uint8_t *)&local_claim_dest);
+ (u8 *)&local_claim_dest);
if (!skb)
goto out;
ethhdr = (struct ethhdr *)skb->data;
- hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
+ hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
/* now we pretend that the client would have sent this ... */
switch (claimtype) {
@@ -383,7 +384,7 @@ out:
* be found.
*/
static struct batadv_bla_backbone_gw *
-batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
unsigned short vid, bool own_backbone)
{
struct batadv_bla_backbone_gw *entry;
@@ -552,7 +553,7 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_bla_backbone_gw *backbone_gw)
{
- uint8_t mac[ETH_ALEN];
+ u8 mac[ETH_ALEN];
__be16 crc;
memcpy(mac, batadv_announce_mac, 4);
@@ -571,7 +572,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
* @backbone_gw: the backbone gateway which claims it
*/
static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
- const uint8_t *mac, const unsigned short vid,
+ const u8 *mac, const unsigned short vid,
struct batadv_bla_backbone_gw *backbone_gw)
{
struct batadv_bla_claim *claim;
@@ -635,7 +636,7 @@ claim_free_ref:
* given mac address and vid.
*/
static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
- const uint8_t *mac, const unsigned short vid)
+ const u8 *mac, const unsigned short vid)
{
struct batadv_bla_claim search_claim, *claim;
@@ -659,12 +660,11 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
}
/* check for ANNOUNCE frame, return 1 if handled */
-static int batadv_handle_announce(struct batadv_priv *bat_priv,
- uint8_t *an_addr, uint8_t *backbone_addr,
- unsigned short vid)
+static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+ u8 *backbone_addr, unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
- uint16_t crc;
+ u16 crc;
if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
return 0;
@@ -708,8 +708,8 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
/* check for REQUEST frame, return 1 if handled */
static int batadv_handle_request(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- uint8_t *backbone_addr,
- struct ethhdr *ethhdr, unsigned short vid)
+ u8 *backbone_addr, struct ethhdr *ethhdr,
+ unsigned short vid)
{
/* check for REQUEST frame */
if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
@@ -732,8 +732,8 @@ static int batadv_handle_request(struct batadv_priv *bat_priv,
/* check for UNCLAIM frame, return 1 if handled */
static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- uint8_t *backbone_addr,
- uint8_t *claim_addr, unsigned short vid)
+ u8 *backbone_addr, u8 *claim_addr,
+ unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@ -761,7 +761,7 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
/* check for CLAIM frame, return 1 if handled */
static int batadv_handle_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- uint8_t *backbone_addr, uint8_t *claim_addr,
+ u8 *backbone_addr, u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
@@ -805,10 +805,10 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
*/
static int batadv_check_claim_group(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
- uint8_t *hw_src, uint8_t *hw_dst,
+ u8 *hw_src, u8 *hw_dst,
struct ethhdr *ethhdr)
{
- uint8_t *backbone_addr;
+ u8 *backbone_addr;
struct batadv_orig_node *orig_node;
struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
@@ -877,7 +877,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
- uint8_t *hw_src, *hw_dst;
+ u8 *hw_src, *hw_dst;
struct vlan_hdr *vhdr, vhdr_buf;
struct ethhdr *ethhdr;
struct arphdr *arphdr;
@@ -923,7 +923,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
/* pskb_may_pull() may have modified the pointers, get ethhdr again */
ethhdr = eth_hdr(skb);
- arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
+ arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
/* Check whether the ARP frame carries a valid
* IP information
@@ -937,7 +937,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
if (arphdr->ar_pln != 4)
return 0;
- hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
+ hw_src = (u8 *)arphdr + sizeof(struct arphdr);
hw_dst = hw_src + ETH_ALEN + 4;
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
bla_dst_own = &bat_priv->bla.claim_dest;
@@ -1238,9 +1238,9 @@ static struct lock_class_key batadv_backbone_hash_lock_class_key;
int batadv_bla_init(struct batadv_priv *bat_priv)
{
int i;
- uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+ u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
struct batadv_hard_iface *primary_if;
- uint16_t crc;
+ u16 crc;
unsigned long entrytime;
spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
@@ -1368,7 +1368,7 @@ out:
*
* Returns true if orig is a backbone for this vid, false otherwise.
*/
-bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@ -1647,9 +1647,9 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_bla_claim *claim;
struct batadv_hard_iface *primary_if;
struct hlist_head *head;
- uint32_t i;
+ u32 i;
bool is_own;
- uint8_t *primary_addr;
+ u8 *primary_addr;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
@@ -1692,9 +1692,9 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_hard_iface *primary_if;
struct hlist_head *head;
int secs, msecs;
- uint32_t i;
+ u32 i;
bool is_own;
- uint8_t *primary_addr;
+ u8 *primary_addr;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 028269038..025152b34 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -22,9 +22,6 @@
#include <linux/types.h>
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
struct seq_file;
struct sk_buff;
@@ -38,7 +35,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
void *offset);
-bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
unsigned short vid);
int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
struct sk_buff *skb);
@@ -84,8 +81,7 @@ static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
}
static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
- uint8_t *orig,
- unsigned short vid)
+ u8 *orig, unsigned short vid)
{
return false;
}
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 187acdc85..80ab8d6f0 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -22,7 +22,6 @@
#include <linux/kconfig.h>
-struct batadv_hard_iface;
struct net_device;
#define BATADV_DEBUGFS_SUBDIR "batman_adv"
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index cc7d87d64..83bc1aaf5 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -102,7 +102,7 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
struct batadv_dat_entry *dat_entry;
struct hlist_node *node_tmp;
struct hlist_head *head;
- uint32_t i;
+ u32 i;
if (!bat_priv->dat.hash)
return;
@@ -168,11 +168,11 @@ static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
*
* Returns the value of the hw_src field in the ARP packet.
*/
-static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
+static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
{
- uint8_t *addr;
+ u8 *addr;
- addr = (uint8_t *)(skb->data + hdr_size);
+ addr = (u8 *)(skb->data + hdr_size);
addr += ETH_HLEN + sizeof(struct arphdr);
return addr;
@@ -197,7 +197,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
*
* Returns the value of the hw_dst field in the ARP packet.
*/
-static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
+static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
{
return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
}
@@ -221,12 +221,12 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
*
* Returns the selected index in the hash table for the given data.
*/
-static uint32_t batadv_hash_dat(const void *data, uint32_t size)
+static u32 batadv_hash_dat(const void *data, u32 size)
{
- uint32_t hash = 0;
+ u32 hash = 0;
const struct batadv_dat_entry *dat = data;
const unsigned char *key;
- uint32_t i;
+ u32 i;
key = (const unsigned char *)&dat->ip;
for (i = 0; i < sizeof(dat->ip); i++) {
@@ -265,7 +265,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
struct hlist_head *head;
struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
struct batadv_hashtable *hash = bat_priv->dat.hash;
- uint32_t index;
+ u32 index;
if (!hash)
return NULL;
@@ -300,7 +300,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
* @vid: VLAN identifier
*/
static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
- uint8_t *mac_addr, unsigned short vid)
+ u8 *mac_addr, unsigned short vid)
{
struct batadv_dat_entry *dat_entry;
int hash_added;
@@ -357,11 +357,11 @@ out:
* @msg: message to print together with the debugging information
*/
static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
- uint16_t type, int hdr_size, char *msg)
+ u16 type, int hdr_size, char *msg)
{
struct batadv_unicast_4addr_packet *unicast_4addr_packet;
struct batadv_bcast_packet *bcast_pkt;
- uint8_t *orig_addr;
+ u8 *orig_addr;
__be32 ip_src, ip_dst;
if (msg)
@@ -424,7 +424,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
#else
static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
- uint16_t type, int hdr_size, char *msg)
+ u16 type, int hdr_size, char *msg)
{
}
@@ -497,7 +497,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
int select, batadv_dat_addr_t ip_key,
batadv_dat_addr_t *last_max)
{
- batadv_dat_addr_t max = 0, tmp_max = 0;
+ batadv_dat_addr_t max = 0;
+ batadv_dat_addr_t tmp_max = 0;
struct batadv_orig_node *orig_node, *max_orig_node = NULL;
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
@@ -709,9 +710,8 @@ void batadv_dat_status_update(struct net_device *net_dev)
*/
static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags,
- void *tvlv_value,
- uint16_t tvlv_value_len)
+ u8 flags,
+ void *tvlv_value, u16 tvlv_value_len)
{
if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
@@ -787,7 +787,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
struct hlist_head *head;
unsigned long last_seen_jiffies;
int last_seen_msecs, last_seen_secs, last_seen_mins;
- uint32_t i;
+ u32 i;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
@@ -830,14 +830,14 @@ out:
*
* Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise.
*/
-static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
- struct sk_buff *skb, int hdr_size)
+static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
{
struct arphdr *arphdr;
struct ethhdr *ethhdr;
__be32 ip_src, ip_dst;
- uint8_t *hw_src, *hw_dst;
- uint16_t type = 0;
+ u8 *hw_src, *hw_dst;
+ u16 type = 0;
/* pull the ethernet header */
if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
@@ -934,9 +934,9 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
- uint16_t type = 0;
+ u16 type = 0;
__be32 ip_dst, ip_src;
- uint8_t *hw_src;
+ u8 *hw_src;
bool ret = false;
struct batadv_dat_entry *dat_entry = NULL;
struct sk_buff *skb_new;
@@ -1022,9 +1022,9 @@ out:
bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
{
- uint16_t type;
+ u16 type;
__be32 ip_src, ip_dst;
- uint8_t *hw_src;
+ u8 *hw_src;
struct sk_buff *skb_new;
struct batadv_dat_entry *dat_entry = NULL;
bool ret = false;
@@ -1100,9 +1100,9 @@ out:
void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
- uint16_t type;
+ u16 type;
__be32 ip_src, ip_dst;
- uint8_t *hw_src, *hw_dst;
+ u8 *hw_src, *hw_dst;
int hdr_size = 0;
unsigned short vid;
@@ -1146,9 +1146,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
{
- uint16_t type;
+ u16 type;
__be32 ip_src, ip_dst;
- uint8_t *hw_src, *hw_dst;
+ u8 *hw_src, *hw_dst;
bool dropped = false;
unsigned short vid;
@@ -1202,7 +1202,7 @@ out:
bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet)
{
- uint16_t type;
+ u16 type;
__be32 ip_dst;
struct batadv_dat_entry *dat_entry = NULL;
bool ret = false;
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 3181507eb..26d4a525a 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -54,7 +54,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
static inline void
batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
{
- uint32_t addr;
+ u32 addr;
addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX);
orig_node->dat_addr = (batadv_dat_addr_t)addr;
@@ -69,7 +69,7 @@ static inline void
batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if)
{
- uint32_t addr;
+ u32 addr;
addr = batadv_choose_orig(primary_if->net_dev->dev_addr,
BATADV_DAT_ADDR_MAX);
@@ -89,7 +89,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
* Updates the ethtool statistics for the received packet if it is a DAT subtype
*/
static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
- uint8_t subtype)
+ u8 subtype)
{
switch (subtype) {
case BATADV_P_DAT_DHT_GET:
@@ -169,7 +169,7 @@ static inline void batadv_dat_free(struct batadv_priv *bat_priv)
}
static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
- uint8_t subtype)
+ u8 subtype)
{
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index c0f0d01ab..700c96c82 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -25,6 +25,7 @@
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/pkt_sched.h>
#include <linux/skbuff.h>
@@ -66,7 +67,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
bool (*check_cb)(struct batadv_frag_table_entry *))
{
struct batadv_frag_table_entry *chain;
- uint8_t i;
+ u8 i;
for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
chain = &orig_node->fragments[i];
@@ -110,8 +111,10 @@ static int batadv_frag_size_limit(void)
* without searching for the right position.
*/
static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
- uint16_t seqno)
+ u16 seqno)
{
+ lockdep_assert_held(&chain->lock);
+
if (chain->seqno == seqno)
return false;
@@ -145,8 +148,8 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
struct batadv_frag_list_entry *frag_entry_last = NULL;
struct batadv_frag_packet *frag_packet;
- uint8_t bucket;
- uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+ u8 bucket;
+ u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
bool ret = false;
/* Linearize packet to avoid linearizing 16 packets in a row when doing
@@ -351,7 +354,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
struct batadv_orig_node *orig_node_dst = NULL;
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_frag_packet *packet;
- uint16_t total_size;
+ u16 total_size;
bool ret = false;
packet = (struct batadv_frag_packet *)skb->data;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index cffa92dd9..e6c8382c7 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -27,7 +27,6 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
@@ -153,20 +152,14 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
- uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
- uint32_t gw_divisor;
- uint8_t max_tq = 0;
- uint8_t tq_avg;
+ u64 max_gw_factor = 0;
+ u64 tmp_gw_factor = 0;
+ u8 max_tq = 0;
+ u8 tq_avg;
struct batadv_orig_node *orig_node;
- gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
- gw_divisor *= 64;
-
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
- if (gw_node->deleted)
- continue;
-
orig_node = gw_node->orig_node;
router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router)
@@ -187,7 +180,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
tmp_gw_factor = tq_avg * tq_avg;
tmp_gw_factor *= gw_node->bandwidth_down;
tmp_gw_factor *= 100 * 100;
- tmp_gw_factor /= gw_divisor;
+ tmp_gw_factor >>= 18;
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
@@ -267,7 +260,8 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
void batadv_gw_election(struct batadv_priv *bat_priv)
{
- struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
+ struct batadv_gw_node *curr_gw = NULL;
+ struct batadv_gw_node *next_gw = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
char gw_addr[18] = { '\0' };
@@ -351,8 +345,9 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
struct batadv_neigh_ifinfo *router_orig_tq = NULL;
struct batadv_neigh_ifinfo *router_gw_tq = NULL;
struct batadv_orig_node *curr_gw_orig;
- struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
- uint8_t gw_tq_avg, orig_tq_avg;
+ struct batadv_neigh_node *router_gw = NULL;
+ struct batadv_neigh_node *router_orig = NULL;
+ u8 gw_tq_avg, orig_tq_avg;
curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
if (!curr_gw_orig)
@@ -474,9 +469,6 @@ batadv_gw_node_get(struct batadv_priv *bat_priv,
if (gw_node_tmp->orig_node != orig_node)
continue;
- if (gw_node_tmp->deleted)
- continue;
-
if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
continue;
@@ -526,9 +518,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
- gw_node->deleted = 0;
if (ntohl(gateway->bandwidth_down) == 0) {
- gw_node->deleted = jiffies;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Gateway %pM removed from gateway list\n",
orig_node->orig);
@@ -536,14 +526,21 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
/* Note: We don't need a NULL check here, since curr_gw never
* gets dereferenced.
*/
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ hlist_del_init_rcu(&gw_node->list);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
+
+ batadv_gw_node_free_ref(gw_node);
+
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (gw_node == curr_gw)
batadv_gw_reselect(bat_priv);
+
+ if (curr_gw)
+ batadv_gw_node_free_ref(curr_gw);
}
out:
- if (curr_gw)
- batadv_gw_node_free_ref(curr_gw);
if (gw_node)
batadv_gw_node_free_ref(gw_node);
}
@@ -559,39 +556,18 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
batadv_gw_node_update(bat_priv, orig_node, &gateway);
}
-void batadv_gw_node_purge(struct batadv_priv *bat_priv)
+void batadv_gw_node_free(struct batadv_priv *bat_priv)
{
- struct batadv_gw_node *gw_node, *curr_gw;
+ struct batadv_gw_node *gw_node;
struct hlist_node *node_tmp;
- unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
- int do_reselect = 0;
-
- curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
spin_lock_bh(&bat_priv->gw.list_lock);
-
hlist_for_each_entry_safe(gw_node, node_tmp,
&bat_priv->gw.list, list) {
- if (((!gw_node->deleted) ||
- (time_before(jiffies, gw_node->deleted + timeout))) &&
- atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
- continue;
-
- if (curr_gw == gw_node)
- do_reselect = 1;
-
- hlist_del_rcu(&gw_node->list);
+ hlist_del_init_rcu(&gw_node->list);
batadv_gw_node_free_ref(gw_node);
}
-
spin_unlock_bh(&bat_priv->gw.list_lock);
-
- /* gw_reselect() needs to acquire the gw_list_lock */
- if (do_reselect)
- batadv_gw_reselect(bat_priv);
-
- if (curr_gw)
- batadv_gw_node_free_ref(curr_gw);
}
/* fails if orig_node has no router */
@@ -655,9 +631,6 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
- if (gw_node->deleted)
- continue;
-
/* fails if orig_node has no router */
if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
continue;
@@ -692,7 +665,7 @@ out:
*/
enum batadv_dhcp_recipient
batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
- uint8_t *chaddr)
+ u8 *chaddr)
{
enum batadv_dhcp_recipient ret = BATADV_DHCP_NO;
struct ethhdr *ethhdr;
@@ -702,7 +675,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
struct vlan_ethhdr *vhdr;
int chaddr_offset;
__be16 proto;
- uint8_t *p;
+ u8 *p;
/* check for ethernet header */
if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
@@ -812,13 +785,15 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
- struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+ struct batadv_neigh_node *neigh_curr = NULL;
+ struct batadv_neigh_node *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
- struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
+ struct batadv_gw_node *gw_node = NULL;
+ struct batadv_gw_node *curr_gw = NULL;
struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
bool out_of_range = false;
- uint8_t curr_tq_avg;
+ u8 curr_tq_avg;
unsigned short vid;
vid = batadv_get_vid(skb, 0);
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 89565b451..fa9527785 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -38,11 +38,11 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
struct batadv_tvlv_gateway_data *gateway);
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
-void batadv_gw_node_purge(struct batadv_priv *bat_priv);
+void batadv_gw_node_free(struct batadv_priv *bat_priv);
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
enum batadv_dhcp_recipient
batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
- uint8_t *chaddr);
+ u8 *chaddr);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 39cf44cce..0cb5e6b6f 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -19,8 +19,10 @@
#include "main.h"
#include <linux/atomic.h>
+#include <linux/errno.h>
#include <linux/byteorder/generic.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/netdevice.h>
#include <linux/stddef.h>
#include <linux/string.h>
@@ -39,11 +41,11 @@
* Returns false on parse error and true otherwise.
*/
static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
- uint32_t *down, uint32_t *up)
+ u32 *down, u32 *up)
{
enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
char *slash_ptr, *tmp_ptr;
- long ldown, lup;
+ u64 ldown, lup;
int ret;
slash_ptr = strchr(buff, '/');
@@ -61,7 +63,7 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
*tmp_ptr = '\0';
}
- ret = kstrtol(buff, 10, &ldown);
+ ret = kstrtou64(buff, 10, &ldown);
if (ret) {
batadv_err(net_dev,
"Download speed of gateway mode invalid: %s\n",
@@ -71,14 +73,31 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
switch (bw_unit_type) {
case BATADV_BW_UNIT_MBIT:
- *down = ldown * 10;
+ /* prevent overflow */
+ if (U64_MAX / 10 < ldown) {
+ batadv_err(net_dev,
+ "Download speed of gateway mode too large: %s\n",
+ buff);
+ return false;
+ }
+
+ ldown *= 10;
break;
case BATADV_BW_UNIT_KBIT:
default:
- *down = ldown / 100;
+ ldown = div_u64(ldown, 100);
break;
}
+ if (U32_MAX < ldown) {
+ batadv_err(net_dev,
+ "Download speed of gateway mode too large: %s\n",
+ buff);
+ return false;
+ }
+
+ *down = ldown;
+
/* we also got some upload info */
if (slash_ptr) {
bw_unit_type = BATADV_BW_UNIT_KBIT;
@@ -94,7 +113,7 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
*tmp_ptr = '\0';
}
- ret = kstrtol(slash_ptr + 1, 10, &lup);
+ ret = kstrtou64(slash_ptr + 1, 10, &lup);
if (ret) {
batadv_err(net_dev,
"Upload speed of gateway mode invalid: %s\n",
@@ -104,13 +123,30 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
switch (bw_unit_type) {
case BATADV_BW_UNIT_MBIT:
- *up = lup * 10;
+ /* prevent overflow */
+ if (U64_MAX / 10 < lup) {
+ batadv_err(net_dev,
+ "Upload speed of gateway mode too large: %s\n",
+ slash_ptr + 1);
+ return false;
+ }
+
+ lup *= 10;
break;
case BATADV_BW_UNIT_KBIT:
default:
- *up = lup / 100;
+ lup = div_u64(lup, 100);
break;
}
+
+ if (U32_MAX < lup) {
+ batadv_err(net_dev,
+ "Upload speed of gateway mode too large: %s\n",
+ slash_ptr + 1);
+ return false;
+ }
+
+ *up = lup;
}
return true;
@@ -124,7 +160,7 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
{
struct batadv_tvlv_gateway_data gw;
- uint32_t down, up;
+ u32 down, up;
char gw_mode;
gw_mode = atomic_read(&bat_priv->gw_mode);
@@ -149,7 +185,10 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
size_t count)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- uint32_t down_curr, up_curr, down_new = 0, up_new = 0;
+ u32 down_curr;
+ u32 up_curr;
+ u32 down_new = 0;
+ u32 up_new = 0;
bool ret;
down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down);
@@ -157,7 +196,7 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
ret = batadv_parse_gw_bandwidth(net_dev, buff, &down_new, &up_new);
if (!ret)
- goto end;
+ return -EINVAL;
if (!down_new)
down_new = 1;
@@ -181,7 +220,6 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
atomic_set(&bat_priv->gw.bandwidth_up, up_new);
batadv_gw_tvlv_container_update(bat_priv);
-end:
return count;
}
@@ -195,9 +233,8 @@ end:
*/
static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags,
- void *tvlv_value,
- uint16_t tvlv_value_len)
+ u8 flags,
+ void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index bd5c812ce..ab893e318 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -22,7 +22,6 @@
#include <linux/types.h>
-struct batadv_priv;
struct net_device;
enum batadv_gw_modes {
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f4a15d2e5..f11345e16 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -252,6 +252,44 @@ static void batadv_check_known_mac_addr(const struct net_device *net_dev)
rcu_read_unlock();
}
+/**
+ * batadv_hardif_recalc_extra_skbroom() - Recalculate skbuff extra head/tailroom
+ * @soft_iface: netdev struct of the mesh interface
+ */
+static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
+{
+ const struct batadv_hard_iface *hard_iface;
+ unsigned short lower_header_len = ETH_HLEN;
+ unsigned short lower_headroom = 0;
+ unsigned short lower_tailroom = 0;
+ unsigned short needed_headroom;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
+ continue;
+
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ lower_header_len = max_t(unsigned short, lower_header_len,
+ hard_iface->net_dev->hard_header_len);
+
+ lower_headroom = max_t(unsigned short, lower_headroom,
+ hard_iface->net_dev->needed_headroom);
+
+ lower_tailroom = max_t(unsigned short, lower_tailroom,
+ hard_iface->net_dev->needed_tailroom);
+ }
+ rcu_read_unlock();
+
+ needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
+ needed_headroom += batadv_max_header_len();
+
+ soft_iface->needed_headroom = needed_headroom;
+ soft_iface->needed_tailroom = lower_tailroom;
+}
+
int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -474,6 +512,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
"Not using interface %s (retrying later): interface not active\n",
hard_iface->net_dev->name);
+ batadv_hardif_recalc_extra_skbroom(soft_iface);
+
/* begin scheduling originator messages on that interface */
batadv_schedule_bat_ogm(hard_iface);
@@ -528,6 +568,9 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_purge_outstanding_packets(bat_priv, hard_iface);
dev_put(hard_iface->soft_iface);
+ netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
+ batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
+
/* nobody uses this interface anymore */
if (!bat_priv->num_ifaces) {
batadv_gw_check_client_stop(bat_priv);
@@ -536,7 +579,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_softif_destroy_sysfs(hard_iface->soft_iface);
}
- netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
hard_iface->soft_iface = NULL;
batadv_hardif_free_ref(hard_iface);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index e89f3146b..2ea6a18d7 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -25,7 +25,7 @@
/* clears the hash */
static void batadv_hash_init(struct batadv_hashtable *hash)
{
- uint32_t i;
+ u32 i;
for (i = 0; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@ void batadv_hash_destroy(struct batadv_hashtable *hash)
}
/* allocates and clears the hash */
-struct batadv_hashtable *batadv_hash_new(uint32_t size)
+struct batadv_hashtable *batadv_hash_new(u32 size)
{
struct batadv_hashtable *hash;
@@ -73,7 +73,7 @@ free_hash:
void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
struct lock_class_key *key)
{
- uint32_t i;
+ u32 i;
for (i = 0; i < hash->size; i++)
lockdep_set_class(&hash->list_locks[i], key);
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 5065f50c9..377626250 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -39,17 +39,17 @@ typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
* based on the key in the data of the first
* argument and the size the second
*/
-typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t);
+typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
struct batadv_hashtable {
struct hlist_head *table; /* the hashtable itself with the buckets */
spinlock_t *list_locks; /* spinlock for each hash list entry */
- uint32_t size; /* size of hashtable */
+ u32 size; /* size of hashtable */
};
/* allocates and clears the hash */
-struct batadv_hashtable *batadv_hash_new(uint32_t size);
+struct batadv_hashtable *batadv_hash_new(u32 size);
/* set class key for all locks */
void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
@@ -69,7 +69,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash,
struct hlist_head *head;
struct hlist_node *node, *node_tmp;
spinlock_t *list_lock; /* spinlock to protect write access */
- uint32_t i;
+ u32 i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -105,7 +105,7 @@ static inline int batadv_hash_add(struct batadv_hashtable *hash,
const void *data,
struct hlist_node *data_node)
{
- uint32_t index;
+ u32 index;
int ret = -1;
struct hlist_head *head;
struct hlist_node *node;
@@ -149,7 +149,7 @@ static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
batadv_hashdata_choose_cb choose,
void *data)
{
- uint32_t index;
+ u32 index;
struct hlist_node *node;
struct hlist_head *head;
void *data_save = NULL;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 07061bcba..bcabb5e3f 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -183,7 +183,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
struct batadv_orig_node *orig_node = NULL;
struct batadv_neigh_node *neigh_node = NULL;
size_t packet_len = sizeof(struct batadv_icmp_packet);
- uint8_t *addr;
+ u8 *addr;
if (len < sizeof(struct batadv_icmp_header)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -337,8 +337,8 @@ err:
}
/**
- * batadv_socket_receive_packet - schedule an icmp packet to be sent to userspace
- * on an icmp socket.
+ * batadv_socket_receive_packet - schedule an icmp packet to be sent to
+ * userspace on an icmp socket.
* @socket_client: the socket this packet belongs to
* @icmph: pointer to the header of the icmp packet
* @icmp_len: total length of the icmp packet
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 7de7fce4b..e937143f0 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -23,7 +23,6 @@
#include <linux/types.h>
struct batadv_icmp_header;
-struct batadv_priv;
#define BATADV_ICMP_SOCKET "socket"
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 8457097f1..d7f17c1aa 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
#include <linux/ipv6.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -148,7 +149,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
#endif
INIT_LIST_HEAD(&bat_priv->tt.changes_list);
- INIT_LIST_HEAD(&bat_priv->tt.req_list);
+ INIT_HLIST_HEAD(&bat_priv->tt.req_list);
INIT_LIST_HEAD(&bat_priv->tt.roam_list);
#ifdef CONFIG_BATMAN_ADV_MCAST
INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
@@ -198,7 +199,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
batadv_purge_outstanding_packets(bat_priv, NULL);
- batadv_gw_node_purge(bat_priv);
+ batadv_gw_node_free(bat_priv);
batadv_nc_mesh_free(bat_priv);
batadv_dat_free(bat_priv);
batadv_bla_free(bat_priv);
@@ -234,7 +235,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
*
* Returns 'true' if the mac address was found, false otherwise.
*/
-bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
{
const struct batadv_hard_iface *hard_iface;
bool is_my_mac = false;
@@ -387,7 +388,7 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct batadv_priv *bat_priv;
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *hard_iface;
- uint8_t idx;
+ u8 idx;
int ret;
hard_iface = container_of(ptype, struct batadv_hard_iface,
@@ -496,7 +497,7 @@ static void batadv_recv_handler_init(void)
}
int
-batadv_recv_handler_register(uint8_t packet_type,
+batadv_recv_handler_register(u8 packet_type,
int (*recv_handler)(struct sk_buff *,
struct batadv_hard_iface *))
{
@@ -512,7 +513,7 @@ batadv_recv_handler_register(uint8_t packet_type,
return 0;
}
-void batadv_recv_handler_unregister(uint8_t packet_type)
+void batadv_recv_handler_unregister(u8 packet_type)
{
batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
}
@@ -583,7 +584,7 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
seq_puts(seq, "Available routing algorithms:\n");
hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
- seq_printf(seq, "%s\n", bat_algo_ops->name);
+ seq_printf(seq, " * %s\n", bat_algo_ops->name);
}
return 0;
@@ -642,8 +643,7 @@ batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
* Returns tvlv handler if found or NULL otherwise.
*/
static struct batadv_tvlv_handler
-*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version)
+*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
{
struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
@@ -691,8 +691,7 @@ static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
* Returns tvlv container if found or NULL otherwise.
*/
static struct batadv_tvlv_container
-*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version)
+*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
{
struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
@@ -723,10 +722,10 @@ static struct batadv_tvlv_container
*
* Returns size of all currently registered tvlv containers in bytes.
*/
-static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
{
struct batadv_tvlv_container *tvlv;
- uint16_t tvlv_len = 0;
+ u16 tvlv_len = 0;
hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
tvlv_len += sizeof(struct batadv_tvlv_hdr);
@@ -739,13 +738,17 @@ static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
/**
* batadv_tvlv_container_remove - remove tvlv container from the tvlv container
* list
+ * @bat_priv: the bat priv with all the soft interface information
* @tvlv: the to be removed tvlv container
*
* Has to be called with the appropriate locks being acquired
* (tvlv.container_list_lock).
*/
-static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
+static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
+ struct batadv_tvlv_container *tvlv)
{
+ lockdep_assert_held(&bat_priv->tvlv.handler_list_lock);
+
if (!tvlv)
return;
@@ -764,13 +767,13 @@ static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
* @version: tvlv container type to unregister
*/
void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version)
+ u8 type, u8 version)
{
struct batadv_tvlv_container *tvlv;
spin_lock_bh(&bat_priv->tvlv.container_list_lock);
tvlv = batadv_tvlv_container_get(bat_priv, type, version);
- batadv_tvlv_container_remove(tvlv);
+ batadv_tvlv_container_remove(bat_priv, tvlv);
spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
}
@@ -787,8 +790,8 @@ void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
* content is going to replace the old one.
*/
void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version,
- void *tvlv_value, uint16_t tvlv_value_len)
+ u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_tvlv_container *tvlv_old, *tvlv_new;
@@ -809,7 +812,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
spin_lock_bh(&bat_priv->tvlv.container_list_lock);
tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
- batadv_tvlv_container_remove(tvlv_old);
+ batadv_tvlv_container_remove(bat_priv, tvlv_old);
hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
}
@@ -861,14 +864,13 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
*
* Returns size of all appended tvlv containers in bytes.
*/
-uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
- unsigned char **packet_buff,
- int *packet_buff_len,
- int packet_min_len)
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len, int packet_min_len)
{
struct batadv_tvlv_container *tvlv;
struct batadv_tvlv_hdr *tvlv_hdr;
- uint16_t tvlv_value_len;
+ u16 tvlv_value_len;
void *tvlv_value;
bool ret;
@@ -893,7 +895,7 @@ uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
tvlv_hdr->len = tvlv->tvlv_hdr.len;
tvlv_value = tvlv_hdr + 1;
memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
- tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+ tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
}
end:
@@ -920,8 +922,8 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
struct batadv_tvlv_handler *tvlv_handler,
bool ogm_source,
struct batadv_orig_node *orig_node,
- uint8_t *src, uint8_t *dst,
- void *tvlv_value, uint16_t tvlv_value_len)
+ u8 *src, u8 *dst,
+ void *tvlv_value, u16 tvlv_value_len)
{
if (!tvlv_handler)
return NET_RX_SUCCESS;
@@ -972,13 +974,13 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
bool ogm_source,
struct batadv_orig_node *orig_node,
- uint8_t *src, uint8_t *dst,
- void *tvlv_value, uint16_t tvlv_value_len)
+ u8 *src, u8 *dst,
+ void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_tvlv_handler *tvlv_handler;
struct batadv_tvlv_hdr *tvlv_hdr;
- uint16_t tvlv_value_cont_len;
- uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+ u16 tvlv_value_cont_len;
+ u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
int ret = NET_RX_SUCCESS;
while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
@@ -1000,7 +1002,7 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
tvlv_value_cont_len);
if (tvlv_handler)
batadv_tvlv_handler_free_ref(tvlv_handler);
- tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
+ tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
tvlv_value_len -= tvlv_value_cont_len;
}
@@ -1034,7 +1036,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
void *tvlv_value;
- uint16_t tvlv_value_len;
+ u16 tvlv_value_len;
if (!batadv_ogm_packet)
return;
@@ -1066,14 +1068,14 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
void (*optr)(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags,
+ u8 flags,
void *tvlv_value,
- uint16_t tvlv_value_len),
+ u16 tvlv_value_len),
int (*uptr)(struct batadv_priv *bat_priv,
- uint8_t *src, uint8_t *dst,
+ u8 *src, u8 *dst,
void *tvlv_value,
- uint16_t tvlv_value_len),
- uint8_t type, uint8_t version, uint8_t flags)
+ u16 tvlv_value_len),
+ u8 type, u8 version, u8 flags)
{
struct batadv_tvlv_handler *tvlv_handler;
@@ -1108,7 +1110,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
* @version: tvlv handler version to be unregistered
*/
void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version)
+ u8 type, u8 version)
{
struct batadv_tvlv_handler *tvlv_handler;
@@ -1134,9 +1136,9 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*/
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
- uint8_t *dst, uint8_t type, uint8_t version,
- void *tvlv_value, uint16_t tvlv_value_len)
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
+ u8 *dst, u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
struct batadv_tvlv_hdr *tvlv_hdr;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 41d27c787..ebd8af0a1 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2015.1"
+#define BATADV_SOURCE_VERSION "2015.2"
#endif
/* B.A.T.M.A.N. parameters */
@@ -193,7 +193,7 @@ extern struct workqueue_struct *batadv_event_workqueue;
int batadv_mesh_init(struct net_device *soft_iface);
void batadv_mesh_free(struct net_device *soft_iface);
-bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr);
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq);
int batadv_max_header_len(void);
@@ -202,10 +202,10 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev);
int
-batadv_recv_handler_register(uint8_t packet_type,
+batadv_recv_handler_register(u8 packet_type,
int (*recv_handler)(struct sk_buff *,
struct batadv_hard_iface *));
-void batadv_recv_handler_unregister(uint8_t packet_type);
+void batadv_recv_handler_unregister(u8 packet_type);
int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
@@ -304,7 +304,7 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
* they handle overflows/underflows and can correctly check for a
* predecessor/successor unless the variable sequence number has grown by
* more then 2**(bitwidth(x)-1)-1.
- * This means that for a uint8_t with the maximum value 255, it would think:
+ * This means that for a u8 with the maximum value 255, it would think:
* - when adding nothing - it is neither a predecessor nor a successor
* - before adding more than 127 to the starting value - it is a predecessor,
* - when adding 128 - it is neither a predecessor nor a successor,
@@ -327,10 +327,9 @@ static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
/* Sum and return the cpu-local counters for index 'idx' */
-static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
- size_t idx)
+static inline u64 batadv_sum_counter(struct batadv_priv *bat_priv, size_t idx)
{
- uint64_t *counters, sum = 0;
+ u64 *counters, sum = 0;
int cpu;
for_each_possible_cpu(cpu) {
@@ -348,39 +347,38 @@ static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0]))
void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version,
- void *tvlv_value, uint16_t tvlv_value_len);
-uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
- unsigned char **packet_buff,
- int *packet_buff_len,
- int packet_min_len);
+ u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len);
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len, int packet_min_len);
void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
struct batadv_ogm_packet *batadv_ogm_packet,
struct batadv_orig_node *orig_node);
void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version);
+ u8 type, u8 version);
void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
void (*optr)(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags,
+ u8 flags,
void *tvlv_value,
- uint16_t tvlv_value_len),
+ u16 tvlv_value_len),
int (*uptr)(struct batadv_priv *bat_priv,
- uint8_t *src, uint8_t *dst,
+ u8 *src, u8 *dst,
void *tvlv_value,
- uint16_t tvlv_value_len),
- uint8_t type, uint8_t version, uint8_t flags);
+ u16 tvlv_value_len),
+ u8 type, u8 version, u8 flags);
void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
- uint8_t type, uint8_t version);
+ u8 type, u8 version);
int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
bool ogm_source,
struct batadv_orig_node *orig_node,
- uint8_t *src, uint8_t *dst,
- void *tvlv_buff, uint16_t tvlv_buff_len);
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
- uint8_t *dst, uint8_t type, uint8_t version,
- void *tvlv_value, uint16_t tvlv_value_len);
+ u8 *src, u8 *dst,
+ void *tvlv_buff, u16 tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
+ u8 *dst, u8 type, u8 version,
+ void *tvlv_value, u16 tvlv_value_len);
unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid);
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 68a955496..eb76386f8 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -31,6 +31,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -89,7 +90,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
* Returns true if the given address is already in the given list.
* Otherwise returns false.
*/
-static bool batadv_mcast_mla_is_duplicate(uint8_t *mcast_addr,
+static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
struct hlist_head *mcast_list)
{
struct batadv_hw_addr *mcast_entry;
@@ -103,15 +104,19 @@ static bool batadv_mcast_mla_is_duplicate(uint8_t *mcast_addr,
/**
* batadv_mcast_mla_list_free - free a list of multicast addresses
+ * @bat_priv: the bat priv with all the soft interface information
* @mcast_list: the list to free
*
* Removes and frees all items in the given mcast_list.
*/
-static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
+static void batadv_mcast_mla_list_free(struct batadv_priv *bat_priv,
+ struct hlist_head *mcast_list)
{
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
+ lockdep_assert_held(&bat_priv->tt.commit_lock);
+
hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
hlist_del(&mcast_entry->list);
kfree(mcast_entry);
@@ -134,6 +139,8 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
+ lockdep_assert_held(&bat_priv->tt.commit_lock);
+
hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
list) {
if (mcast_list &&
@@ -164,6 +171,8 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
+ lockdep_assert_held(&bat_priv->tt.commit_lock);
+
if (!mcast_list)
return;
@@ -268,7 +277,7 @@ update:
batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
out:
- batadv_mcast_mla_list_free(&mcast_list);
+ batadv_mcast_mla_list_free(bat_priv, &mcast_list);
}
/**
@@ -595,11 +604,13 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
*/
static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t mcast_flags)
+ u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
@@ -638,11 +649,13 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
*/
static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t mcast_flags)
+ u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
@@ -681,11 +694,13 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
*/
static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t mcast_flags)
+ u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
+ lockdep_assert_held(&orig->mcast_handler_lock);
+
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
@@ -721,17 +736,17 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
*/
static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags,
+ u8 flags,
void *tvlv_value,
- uint16_t tvlv_value_len)
+ u16 tvlv_value_len)
{
bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
- uint8_t mcast_flags = BATADV_NO_FLAGS;
+ u8 mcast_flags = BATADV_NO_FLAGS;
bool orig_initialized;
if (orig_mcast_enabled && tvlv_value &&
(tvlv_value_len >= sizeof(mcast_flags)))
- mcast_flags = *(uint8_t *)tvlv_value;
+ mcast_flags = *(u8 *)tvlv_value;
spin_lock_bh(&orig->mcast_handler_lock);
orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index beb6e56c6..8f3cb04b9 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -20,8 +20,6 @@
#include "main.h"
-struct batadv_orig_node;
-struct batadv_priv;
struct sk_buff;
/**
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 46604010d..f5276be2c 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -130,9 +130,8 @@ void batadv_nc_status_update(struct net_device *net_dev)
*/
static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags,
- void *tvlv_value,
- uint16_t tvlv_value_len)
+ u8 flags,
+ void *tvlv_value, u16 tvlv_value_len)
{
if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
@@ -382,7 +381,7 @@ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
- uint32_t i;
+ u32 i;
if (!hash)
return;
@@ -418,7 +417,7 @@ static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
struct hlist_node *node_tmp;
struct batadv_nc_path *nc_path;
spinlock_t *lock; /* Protects lists in hash */
- uint32_t i;
+ u32 i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -478,10 +477,10 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
*
* Returns the selected index in the hash table for the given data.
*/
-static uint32_t batadv_nc_hash_choose(const void *data, uint32_t size)
+static u32 batadv_nc_hash_choose(const void *data, u32 size)
{
const struct batadv_nc_path *nc_path = data;
- uint32_t hash = 0;
+ u32 hash = 0;
hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
@@ -587,6 +586,8 @@ static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
unsigned long timeout = bat_priv->nc.max_buffer_time;
bool res = false;
+ lockdep_assert_held(&nc_path->packet_list_lock);
+
/* Packets are added to tail, so the remaining packets did not time
* out and we can stop processing the current queue
*/
@@ -623,6 +624,8 @@ static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
{
unsigned long timeout = bat_priv->nc.max_fwd_delay;
+ lockdep_assert_held(&nc_path->packet_list_lock);
+
/* Packets are added to tail, so the remaining packets did not time
* out and we can stop processing the current queue
*/
@@ -744,8 +747,8 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
struct batadv_ogm_packet *ogm_packet)
{
struct batadv_orig_ifinfo *orig_ifinfo;
- uint32_t last_real_seqno;
- uint8_t last_ttl;
+ u32 last_real_seqno;
+ u8 last_ttl;
orig_ifinfo = batadv_orig_ifinfo_get(orig_node, BATADV_IF_DEFAULT);
if (!orig_ifinfo)
@@ -873,8 +876,8 @@ free:
}
/**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node structs
- * (best called on incoming OGMs)
+ * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ * structs (best called on incoming OGMs)
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node originating the ogm packet
* @orig_neigh_node: neighboring orig node from which we received the ogm packet
@@ -888,7 +891,8 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
struct batadv_ogm_packet *ogm_packet,
int is_single_hop_neigh)
{
- struct batadv_nc_node *in_nc_node = NULL, *out_nc_node = NULL;
+ struct batadv_nc_node *in_nc_node = NULL;
+ struct batadv_nc_node *out_nc_node = NULL;
/* Check if network coding is enabled */
if (!atomic_read(&bat_priv->network_coding))
@@ -938,8 +942,8 @@ out:
*/
static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
- uint8_t *src,
- uint8_t *dst)
+ u8 *src,
+ u8 *dst)
{
int hash_added;
struct batadv_nc_path *nc_path, nc_path_key;
@@ -991,9 +995,9 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
* selection of a receiver with slightly lower TQ than the other
* @tq: to be weighted tq value
*/
-static uint8_t batadv_nc_random_weight_tq(uint8_t tq)
+static u8 batadv_nc_random_weight_tq(u8 tq)
{
- uint8_t rand_val, rand_tq;
+ u8 rand_val, rand_tq;
get_random_bytes(&rand_val, sizeof(rand_val));
@@ -1038,7 +1042,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
struct batadv_nc_packet *nc_packet,
struct batadv_neigh_node *neigh_node)
{
- uint8_t tq_weighted_neigh, tq_weighted_coding, tq_tmp;
+ u8 tq_weighted_neigh, tq_weighted_coding, tq_tmp;
struct sk_buff *skb_dest, *skb_src;
struct batadv_unicast_packet *packet1;
struct batadv_unicast_packet *packet2;
@@ -1047,7 +1051,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
struct batadv_neigh_node *router_coding = NULL;
struct batadv_neigh_ifinfo *router_neigh_ifinfo = NULL;
struct batadv_neigh_ifinfo *router_coding_ifinfo = NULL;
- uint8_t *first_source, *first_dest, *second_source, *second_dest;
+ u8 *first_source, *first_dest, *second_source, *second_dest;
__be32 packet_id1, packet_id2;
size_t count;
bool res = false;
@@ -1231,8 +1235,7 @@ out:
*
* Returns true if coding of a decoded packet is allowed.
*/
-static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
- uint8_t *dst, uint8_t *src)
+static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
{
if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
return false;
@@ -1255,7 +1258,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
struct batadv_nc_node *in_nc_node,
struct batadv_nc_node *out_nc_node,
struct sk_buff *skb,
- uint8_t *eth_dst)
+ u8 *eth_dst)
{
struct batadv_nc_path *nc_path, nc_path_key;
struct batadv_nc_packet *nc_packet_out = NULL;
@@ -1321,8 +1324,8 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
static struct batadv_nc_packet *
batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
struct sk_buff *skb,
- uint8_t *eth_dst,
- uint8_t *eth_src,
+ u8 *eth_dst,
+ u8 *eth_src,
struct batadv_nc_node *in_nc_node)
{
struct batadv_orig_node *orig_node;
@@ -1362,7 +1365,7 @@ batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
*/
static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
struct sk_buff *skb,
- uint8_t *eth_dst_new)
+ u8 *eth_dst_new)
{
struct ethhdr *ethhdr;
@@ -1638,7 +1641,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_unicast_packet *unicast_packet;
struct batadv_coded_packet coded_packet_tmp;
struct ethhdr *ethhdr, ethhdr_tmp;
- uint8_t *orig_dest, ttl, ttvn;
+ u8 *orig_dest, ttl, ttvn;
unsigned int coding_len;
int err;
@@ -1730,7 +1733,7 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
struct batadv_nc_path *nc_path, nc_path_key;
- uint8_t *dest, *source;
+ u8 *dest, *source;
__be32 packet_id;
int index;
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 5b79aa8c6..8f6d4ad87 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,11 +22,7 @@
#include <linux/types.h>
-struct batadv_nc_node;
-struct batadv_neigh_node;
struct batadv_ogm_packet;
-struct batadv_orig_node;
-struct batadv_priv;
struct net_device;
struct seq_file;
struct sk_buff;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 32a0fcfab..7486df9ed 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -26,6 +26,7 @@
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/rculist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -70,7 +71,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
struct batadv_orig_node_vlan *vlan = NULL, *tmp;
rcu_read_lock();
- list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+ hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
if (tmp->vid != vid)
continue;
@@ -118,7 +119,7 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
atomic_set(&vlan->refcount, 2);
vlan->vid = vid;
- list_add_rcu(&vlan->list, &orig_node->vlan_list);
+ hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
out:
spin_unlock_bh(&orig_node->vlan_list_lock);
@@ -442,41 +443,6 @@ out:
}
/**
- * batadv_neigh_node_new - create and init a new neigh_node object
- * @hard_iface: the interface where the neighbour is connected to
- * @neigh_addr: the mac address of the neighbour interface
- * @orig_node: originator object representing the neighbour
- *
- * Allocates a new neigh_node object and initialises all the generic fields.
- * Returns the new object or NULL on failure.
- */
-struct batadv_neigh_node *
-batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- struct batadv_orig_node *orig_node)
-{
- struct batadv_neigh_node *neigh_node;
-
- neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
- if (!neigh_node)
- goto out;
-
- INIT_HLIST_NODE(&neigh_node->list);
- INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
- spin_lock_init(&neigh_node->ifinfo_lock);
-
- ether_addr_copy(neigh_node->addr, neigh_addr);
- neigh_node->if_incoming = hard_iface;
- neigh_node->orig_node = orig_node;
-
- /* extra reference for return */
- atomic_set(&neigh_node->refcount, 2);
-
-out:
- return neigh_node;
-}
-
-/**
* batadv_neigh_node_get - retrieve a neighbour from the list
* @orig_node: originator which the neighbour belongs to
* @hard_iface: the interface where this neighbour is connected to
@@ -486,10 +452,10 @@ out:
* which is connected through the provided hard interface.
* Returns NULL if the neighbour is not found.
*/
-struct batadv_neigh_node *
+static struct batadv_neigh_node *
batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
const struct batadv_hard_iface *hard_iface,
- const uint8_t *addr)
+ const u8 *addr)
{
struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
@@ -513,6 +479,59 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
}
/**
+ * batadv_neigh_node_new - create and init a new neigh_node object
+ * @orig_node: originator object representing the neighbour
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ * Returns the new object or NULL on failure.
+ */
+struct batadv_neigh_node *
+batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr)
+{
+ struct batadv_neigh_node *neigh_node;
+
+ neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
+ if (neigh_node)
+ goto out;
+
+ neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
+ if (!neigh_node)
+ goto out;
+
+ if (!atomic_inc_not_zero(&hard_iface->refcount)) {
+ kfree(neigh_node);
+ neigh_node = NULL;
+ goto out;
+ }
+
+ INIT_HLIST_NODE(&neigh_node->list);
+ INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
+ spin_lock_init(&neigh_node->ifinfo_lock);
+
+ ether_addr_copy(neigh_node->addr, neigh_addr);
+ neigh_node->if_incoming = hard_iface;
+ neigh_node->orig_node = orig_node;
+
+ /* extra reference for return */
+ atomic_set(&neigh_node->refcount, 2);
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
+ "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+ neigh_addr, orig_node->orig, hard_iface->net_dev->name);
+
+out:
+ return neigh_node;
+}
+
+/**
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
* @rcu: rcu pointer of the orig_ifinfo object
*/
@@ -624,7 +643,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct batadv_orig_node *orig_node;
- uint32_t i;
+ u32 i;
if (!hash)
return;
@@ -659,7 +678,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
* Returns the newly created object or NULL on failure.
*/
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
- const uint8_t *addr)
+ const u8 *addr)
{
struct batadv_orig_node *orig_node;
struct batadv_orig_node_vlan *vlan;
@@ -674,7 +693,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
return NULL;
INIT_HLIST_HEAD(&orig_node->neigh_list);
- INIT_LIST_HEAD(&orig_node->vlan_list);
+ INIT_HLIST_HEAD(&orig_node->vlan_list);
INIT_HLIST_HEAD(&orig_node->ifinfo_list);
spin_lock_init(&orig_node->bcast_seqno_lock);
spin_lock_init(&orig_node->neigh_list_lock);
@@ -981,7 +1000,7 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct batadv_orig_node *orig_node;
- uint32_t i;
+ u32 i;
if (!hash)
return;
@@ -1010,7 +1029,6 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
spin_unlock_bh(list_lock);
}
- batadv_gw_node_purge(bat_priv);
batadv_gw_election(bat_priv);
}
@@ -1115,7 +1133,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
- uint32_t i;
+ u32 i;
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
@@ -1152,7 +1170,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
struct batadv_hard_iface *hard_iface_tmp;
struct batadv_orig_node *orig_node;
struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
- uint32_t i;
+ u32 i;
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 79734d302..fa18f9bf2 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -40,15 +40,11 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
- const uint8_t *addr);
+ const u8 *addr);
struct batadv_neigh_node *
-batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
- const struct batadv_hard_iface *hard_iface,
- const uint8_t *addr);
-struct batadv_neigh_node *
-batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- struct batadv_orig_node *orig_node);
+batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *hard_iface,
+ const u8 *neigh_addr);
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
struct batadv_neigh_node *
batadv_orig_router_get(struct batadv_orig_node *orig_node,
@@ -86,9 +82,9 @@ void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
/* hashfunction to choose an entry in a hash table of given size
* hash algorithm from http://en.wikipedia.org/wiki/Hash_table
*/
-static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
+static inline u32 batadv_choose_orig(const void *data, u32 size)
{
- uint32_t hash = 0;
+ u32 hash = 0;
hash = jhash(data, ETH_ALEN, hash);
return hash % size;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 9e747c08d..11f996b39 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -197,8 +197,8 @@ enum batadv_tvlv_type {
* transport the claim type and the group id
*/
struct batadv_bla_claim_dst {
- uint8_t magic[3]; /* FF:43:05 */
- uint8_t type; /* bla_claimframe */
+ u8 magic[3]; /* FF:43:05 */
+ u8 type; /* bla_claimframe */
__be16 group; /* group id */
};
@@ -213,16 +213,16 @@ struct batadv_bla_claim_dst {
* @tvlv_len: length of tvlv data following the ogm header
*/
struct batadv_ogm_packet {
- uint8_t packet_type;
- uint8_t version;
- uint8_t ttl;
- uint8_t flags;
- __be32 seqno;
- uint8_t orig[ETH_ALEN];
- uint8_t prev_sender[ETH_ALEN];
- uint8_t reserved;
- uint8_t tq;
- __be16 tvlv_len;
+ u8 packet_type;
+ u8 version;
+ u8 ttl;
+ u8 flags;
+ __be32 seqno;
+ u8 orig[ETH_ALEN];
+ u8 prev_sender[ETH_ALEN];
+ u8 reserved;
+ u8 tq;
+ __be16 tvlv_len;
/* __packed is not needed as the struct size is divisible by 4,
* and the largest data type in this struct has a size of 4.
*/
@@ -246,14 +246,14 @@ struct batadv_ogm_packet {
* members are padded the same way as they are in real packets.
*/
struct batadv_icmp_header {
- uint8_t packet_type;
- uint8_t version;
- uint8_t ttl;
- uint8_t msg_type; /* see ICMP message types above */
- uint8_t dst[ETH_ALEN];
- uint8_t orig[ETH_ALEN];
- uint8_t uid;
- uint8_t align[3];
+ u8 packet_type;
+ u8 version;
+ u8 ttl;
+ u8 msg_type; /* see ICMP message types above */
+ u8 dst[ETH_ALEN];
+ u8 orig[ETH_ALEN];
+ u8 uid;
+ u8 align[3];
};
/**
@@ -269,15 +269,15 @@ struct batadv_icmp_header {
* @seqno: ICMP sequence number
*/
struct batadv_icmp_packet {
- uint8_t packet_type;
- uint8_t version;
- uint8_t ttl;
- uint8_t msg_type; /* see ICMP message types above */
- uint8_t dst[ETH_ALEN];
- uint8_t orig[ETH_ALEN];
- uint8_t uid;
- uint8_t reserved;
- __be16 seqno;
+ u8 packet_type;
+ u8 version;
+ u8 ttl;
+ u8 msg_type; /* see ICMP message types above */
+ u8 dst[ETH_ALEN];
+ u8 orig[ETH_ALEN];
+ u8 uid;
+ u8 reserved;
+ __be16 seqno;
};
#define BATADV_RR_LEN 16
@@ -296,16 +296,16 @@ struct batadv_icmp_packet {
* @rr: route record array
*/
struct batadv_icmp_packet_rr {
- uint8_t packet_type;
- uint8_t version;
- uint8_t ttl;
- uint8_t msg_type; /* see ICMP message types above */
- uint8_t dst[ETH_ALEN];
- uint8_t orig[ETH_ALEN];
- uint8_t uid;
- uint8_t rr_cur;
- __be16 seqno;
- uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
+ u8 packet_type;
+ u8 version;
+ u8 ttl;
+ u8 msg_type; /* see ICMP message types above */
+ u8 dst[ETH_ALEN];
+ u8 orig[ETH_ALEN];
+ u8 uid;
+ u8 rr_cur;
+ __be16 seqno;
+ u8 rr[BATADV_RR_LEN][ETH_ALEN];
};
#define BATADV_ICMP_MAX_PACKET_SIZE sizeof(struct batadv_icmp_packet_rr)
@@ -331,11 +331,11 @@ struct batadv_icmp_packet_rr {
* @dest: originator destination of the unicast packet
*/
struct batadv_unicast_packet {
- uint8_t packet_type;
- uint8_t version;
- uint8_t ttl;
- uint8_t ttvn; /* destination translation table version number */
- uint8_t dest[ETH_ALEN];
+ u8 packet_type;
+ u8 version;
+ u8 ttl;
+ u8 ttvn; /* destination translation table version number */
+ u8 dest[ETH_ALEN];
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
@@ -349,9 +349,9 @@ struct batadv_unicast_packet {
*/
struct batadv_unicast_4addr_packet {
struct batadv_unicast_packet u;
- uint8_t src[ETH_ALEN];
- uint8_t subtype;
- uint8_t reserved;
+ u8 src[ETH_ALEN];
+ u8 subtype;
+ u8 reserved;
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
@@ -370,22 +370,22 @@ struct batadv_unicast_4addr_packet {
* @total_size: size of the merged packet
*/
struct batadv_frag_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
+ u8 packet_type;
+ u8 version; /* batman version field */
+ u8 ttl;
#if defined(__BIG_ENDIAN_BITFIELD)
- uint8_t no:4;
- uint8_t reserved:4;
+ u8 no:4;
+ u8 reserved:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- uint8_t reserved:4;
- uint8_t no:4;
+ u8 reserved:4;
+ u8 no:4;
#else
#error "unknown bitfield endianness"
#endif
- uint8_t dest[ETH_ALEN];
- uint8_t orig[ETH_ALEN];
- __be16 seqno;
- __be16 total_size;
+ u8 dest[ETH_ALEN];
+ u8 orig[ETH_ALEN];
+ __be16 seqno;
+ __be16 total_size;
};
/**
@@ -398,12 +398,12 @@ struct batadv_frag_packet {
* @orig: originator of the broadcast packet
*/
struct batadv_bcast_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
- uint8_t reserved;
- __be32 seqno;
- uint8_t orig[ETH_ALEN];
+ u8 packet_type;
+ u8 version; /* batman version field */
+ u8 ttl;
+ u8 reserved;
+ __be32 seqno;
+ u8 orig[ETH_ALEN];
/* "4 bytes boundary + 2 bytes" long to make the payload after the
* following ethernet header again 4 bytes boundary aligned
*/
@@ -428,21 +428,21 @@ struct batadv_bcast_packet {
* @coded_len: length of network coded part of the payload
*/
struct batadv_coded_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
- uint8_t first_ttvn;
- /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */
- uint8_t first_source[ETH_ALEN];
- uint8_t first_orig_dest[ETH_ALEN];
- __be32 first_crc;
- uint8_t second_ttl;
- uint8_t second_ttvn;
- uint8_t second_dest[ETH_ALEN];
- uint8_t second_source[ETH_ALEN];
- uint8_t second_orig_dest[ETH_ALEN];
- __be32 second_crc;
- __be16 coded_len;
+ u8 packet_type;
+ u8 version; /* batman version field */
+ u8 ttl;
+ u8 first_ttvn;
+ /* u8 first_dest[ETH_ALEN]; - saved in mac header destination */
+ u8 first_source[ETH_ALEN];
+ u8 first_orig_dest[ETH_ALEN];
+ __be32 first_crc;
+ u8 second_ttl;
+ u8 second_ttvn;
+ u8 second_dest[ETH_ALEN];
+ u8 second_source[ETH_ALEN];
+ u8 second_orig_dest[ETH_ALEN];
+ __be32 second_crc;
+ __be16 coded_len;
};
#pragma pack()
@@ -459,14 +459,14 @@ struct batadv_coded_packet {
* @align: 2 bytes to align the header to a 4 byte boundary
*/
struct batadv_unicast_tvlv_packet {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
- uint8_t reserved;
- uint8_t dst[ETH_ALEN];
- uint8_t src[ETH_ALEN];
- __be16 tvlv_len;
- uint16_t align;
+ u8 packet_type;
+ u8 version; /* batman version field */
+ u8 ttl;
+ u8 reserved;
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 tvlv_len;
+ u16 align;
};
/**
@@ -476,9 +476,9 @@ struct batadv_unicast_tvlv_packet {
* @len: tvlv container length
*/
struct batadv_tvlv_hdr {
- uint8_t type;
- uint8_t version;
- __be16 len;
+ u8 type;
+ u8 version;
+ __be16 len;
};
/**
@@ -500,9 +500,9 @@ struct batadv_tvlv_gateway_data {
* one batadv_tvlv_tt_vlan_data object per announced vlan
*/
struct batadv_tvlv_tt_data {
- uint8_t flags;
- uint8_t ttvn;
- __be16 num_vlan;
+ u8 flags;
+ u8 ttvn;
+ __be16 num_vlan;
};
/**
@@ -513,9 +513,9 @@ struct batadv_tvlv_tt_data {
* @reserved: unused, useful for alignment purposes
*/
struct batadv_tvlv_tt_vlan_data {
- __be32 crc;
- __be16 vid;
- uint16_t reserved;
+ __be32 crc;
+ __be16 vid;
+ u16 reserved;
};
/**
@@ -527,9 +527,9 @@ struct batadv_tvlv_tt_vlan_data {
* @vid: VLAN identifier
*/
struct batadv_tvlv_tt_change {
- uint8_t flags;
- uint8_t reserved[3];
- uint8_t addr[ETH_ALEN];
+ u8 flags;
+ u8 reserved[3];
+ u8 addr[ETH_ALEN];
__be16 vid;
};
@@ -539,7 +539,7 @@ struct batadv_tvlv_tt_change {
* @vid: VLAN identifier
*/
struct batadv_tvlv_roam_adv {
- uint8_t client[ETH_ALEN];
+ u8 client[ETH_ALEN];
__be16 vid;
};
@@ -549,8 +549,8 @@ struct batadv_tvlv_roam_adv {
* @reserved: reserved field
*/
struct batadv_tvlv_mcast_data {
- uint8_t flags;
- uint8_t reserved[3];
+ u8 flags;
+ u8 reserved[3];
};
#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index c360c0cd1..8d990b070 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -145,7 +145,7 @@ out:
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
-int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
unsigned long *last_reset)
{
if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
@@ -653,19 +653,19 @@ out:
static bool
batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
struct batadv_unicast_packet *unicast_packet,
- uint8_t *dst_addr, unsigned short vid)
+ u8 *dst_addr, unsigned short vid)
{
struct batadv_orig_node *orig_node = NULL;
struct batadv_hard_iface *primary_if = NULL;
bool ret = false;
- uint8_t *orig_addr, orig_ttvn;
+ u8 *orig_addr, orig_ttvn;
if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
orig_addr = primary_if->net_dev->dev_addr;
- orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+ orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
} else {
orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
vid);
@@ -676,7 +676,7 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
goto out;
orig_addr = orig_node->orig;
- orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
}
/* update the packet header */
@@ -698,7 +698,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
struct batadv_unicast_packet *unicast_packet;
struct batadv_hard_iface *primary_if;
struct batadv_orig_node *orig_node;
- uint8_t curr_ttvn, old_ttvn;
+ u8 curr_ttvn, old_ttvn;
struct ethhdr *ethhdr;
unsigned short vid;
int is_old_ttvn;
@@ -740,7 +740,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* value is used later to check if the node which sent (or re-routed
* last time) the packet had an updated information or not
*/
- curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+ curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
orig_node = batadv_orig_hash_find(bat_priv,
unicast_packet->dest);
@@ -751,7 +751,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
if (!orig_node)
return 0;
- curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
batadv_orig_node_free_ref(orig_node);
}
@@ -833,7 +833,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_unicast_packet *unicast_packet;
struct batadv_unicast_4addr_packet *unicast_4addr_packet;
- uint8_t *orig_addr;
+ u8 *orig_addr;
struct batadv_orig_node *orig_node = NULL;
int check, hdr_size = sizeof(*unicast_packet);
bool is4addr;
@@ -904,7 +904,7 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
unsigned char *tvlv_buff;
- uint16_t tvlv_buff_len;
+ u16 tvlv_buff_len;
int hdr_size = sizeof(*unicast_tvlv_packet);
int ret = NET_RX_DROP;
@@ -1007,8 +1007,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
struct ethhdr *ethhdr;
int hdr_size = sizeof(*bcast_packet);
int ret = NET_RX_DROP;
- int32_t seq_diff;
- uint32_t seqno;
+ s32 seq_diff;
+ u32 seqno;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 6bc29d33a..204bbe495 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -22,10 +22,6 @@
#include <linux/types.h>
-struct batadv_hard_iface;
-struct batadv_neigh_node;
-struct batadv_orig_node;
-struct batadv_priv;
struct sk_buff;
bool batadv_check_management_packet(struct sk_buff *skb,
@@ -55,7 +51,7 @@ struct batadv_neigh_node *
batadv_find_router(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if);
-int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
unsigned long *last_reset);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 0a01992e6..f66432480 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -54,7 +54,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
*/
int batadv_send_skb_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
- const uint8_t *dst_addr)
+ const u8 *dst_addr)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct ethhdr *ethhdr;
@@ -172,7 +172,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
struct batadv_orig_node *orig_node)
{
struct batadv_unicast_packet *unicast_packet;
- uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
if (batadv_skb_head_push(skb, hdr_size) < 0)
return false;
@@ -343,12 +343,12 @@ out:
*/
int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
- int packet_subtype, uint8_t *dst_hint,
+ int packet_subtype, u8 *dst_hint,
unsigned short vid)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct batadv_orig_node *orig_node;
- uint8_t *src, *dst;
+ u8 *src, *dst;
src = ethhdr->h_source;
dst = ethhdr->h_dest;
@@ -616,7 +616,8 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
- (forw_packet->if_incoming != hard_iface))
+ (forw_packet->if_incoming != hard_iface) &&
+ (forw_packet->if_outgoing != hard_iface))
continue;
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 0536835fe..82059f259 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -25,15 +25,12 @@
#include "packet.h"
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
struct sk_buff;
struct work_struct;
int batadv_send_skb_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
- const uint8_t *dst_addr);
+ const u8 *dst_addr);
int batadv_send_skb_to_orig(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if);
@@ -56,7 +53,7 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
unsigned short vid);
int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
- int packet_subtype, uint8_t *dst_hint,
+ int packet_subtype, u8 *dst_hint,
unsigned short vid);
int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid);
@@ -75,7 +72,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
* Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/
static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
- struct sk_buff *skb, uint8_t *dst_hint,
+ struct sk_buff *skb, u8 *dst_hint,
unsigned short vid)
{
return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
@@ -100,7 +97,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
struct sk_buff *skb,
int packet_subtype,
- uint8_t *dst_hint,
+ u8 *dst_hint,
unsigned short vid)
{
return batadv_send_skb_via_tt_generic(bat_priv, skb,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 51cda3a7c..ac4d08de5 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -131,7 +131,7 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
struct batadv_priv *bat_priv = netdev_priv(dev);
struct batadv_softif_vlan *vlan;
struct sockaddr *addr = p;
- uint8_t old_addr[ETH_ALEN];
+ u8 old_addr[ETH_ALEN];
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -186,19 +186,19 @@ static int batadv_interface_tx(struct sk_buff *skb,
struct batadv_hard_iface *primary_if = NULL;
struct batadv_bcast_packet *bcast_packet;
__be16 ethertype = htons(ETH_P_BATMAN);
- static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
- 0x00, 0x00};
- static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
- 0x00, 0x00};
+ static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
+ 0x00, 0x00};
+ static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
+ 0x00, 0x00};
enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
- uint8_t *dst_hint = NULL, chaddr[ETH_ALEN];
+ u8 *dst_hint = NULL, chaddr[ETH_ALEN];
struct vlan_ethhdr *vhdr;
unsigned int header_len = 0;
int data_len = skb->len, ret;
unsigned long brd_delay = 1;
bool do_bcast = false, client_added;
unsigned short vid;
- uint32_t seqno;
+ u32 seqno;
int gw_mode;
enum batadv_forw_mode forw_mode;
struct batadv_orig_node *mcast_single_orig = NULL;
@@ -750,9 +750,9 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
static int batadv_softif_init_late(struct net_device *dev)
{
struct batadv_priv *bat_priv;
- uint32_t random_seqno;
+ u32 random_seqno;
int ret;
- size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
+ size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
batadv_set_lockdep_class(dev);
@@ -763,7 +763,7 @@ static int batadv_softif_init_late(struct net_device *dev)
/* batadv_interface_stats() needs to be available as soon as
* register_netdevice() has been called
*/
- bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+ bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
if (!bat_priv->bat_counters)
return -ENOMEM;
@@ -941,14 +941,12 @@ static void batadv_softif_init_early(struct net_device *dev)
dev->netdev_ops = &batadv_netdev_ops;
dev->destructor = batadv_softif_free;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
/* can't call min_mtu, because the needed variables
* have not been initialized yet
*/
dev->mtu = ETH_DATA_LEN;
- /* reserve more space in the skbuff for our header */
- dev->hard_header_len = batadv_max_header_len();
/* generate random address */
eth_hw_addr_random(dev);
@@ -1117,8 +1115,7 @@ static const struct {
#endif
};
-static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
- uint8_t *data)
+static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
if (stringset == ETH_SS_STATS)
memcpy(data, batadv_counters_strings,
@@ -1126,8 +1123,7 @@ static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
}
static void batadv_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- uint64_t *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
int i;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 578e8a663..8e82176f4 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -22,10 +22,6 @@
#include <net/rtnetlink.h>
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
-struct batadv_softif_vlan;
struct net_device;
struct sk_buff;
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index d6a312a82..9de3c8804 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -457,7 +457,7 @@ static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
struct attribute *attr, char *buff)
{
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
- uint32_t down, up;
+ u32 down, up;
down = atomic_read(&bat_priv->gw.bandwidth_down);
up = atomic_read(&bat_priv->gw.bandwidth_up);
@@ -512,7 +512,7 @@ static ssize_t batadv_store_isolation_mark(struct kobject *kobj,
{
struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
struct batadv_priv *bat_priv = netdev_priv(net_dev);
- uint32_t mark, mask;
+ u32 mark, mask;
char *mask_ptr;
/* parse the mask if it has been specified, otherwise assume the mask is
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index 2294583f7..61974428a 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -23,8 +23,6 @@
#include <linux/sysfs.h>
#include <linux/types.h>
-struct batadv_priv;
-struct batadv_softif_vlan;
struct kobject;
struct net_device;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index c9b26291a..4228b10c4 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -56,7 +56,7 @@
static struct lock_class_key batadv_tt_local_hash_lock_class_key;
static struct lock_class_key batadv_tt_global_hash_lock_class_key;
-static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
unsigned short vid,
struct batadv_orig_node *orig_node);
static void batadv_tt_purge(struct work_struct *work);
@@ -85,10 +85,10 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
* Returns the hash index where the object represented by 'data' should be
* stored at.
*/
-static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
+static inline u32 batadv_choose_tt(const void *data, u32 size)
{
struct batadv_tt_common_entry *tt;
- uint32_t hash = 0;
+ u32 hash = 0;
tt = (struct batadv_tt_common_entry *)data;
hash = jhash(&tt->addr, ETH_ALEN, hash);
@@ -107,12 +107,12 @@ static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
* found, NULL otherwise.
*/
static struct batadv_tt_common_entry *
-batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
+batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
unsigned short vid)
{
struct hlist_head *head;
struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
- uint32_t index;
+ u32 index;
if (!hash)
return NULL;
@@ -152,7 +152,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
* found, NULL otherwise.
*/
static struct batadv_tt_local_entry *
-batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_tt_common_entry *tt_common_entry;
@@ -177,7 +177,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
* is found, NULL otherwise.
*/
static struct batadv_tt_global_entry *
-batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_tt_common_entry *tt_common_entry;
@@ -223,7 +223,7 @@ batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
* (excluding ourself).
*/
int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
- const uint8_t *addr, unsigned short vid)
+ const u8 *addr, unsigned short vid)
{
struct batadv_tt_global_entry *tt_global_entry;
int count;
@@ -315,7 +315,7 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
spin_lock_bh(&orig_node->vlan_list_lock);
- list_del_rcu(&vlan->list);
+ hlist_del_init_rcu(&vlan->list);
spin_unlock_bh(&orig_node->vlan_list_lock);
batadv_orig_node_vlan_free_ref(vlan);
}
@@ -364,11 +364,11 @@ batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
*/
static void batadv_tt_local_event(struct batadv_priv *bat_priv,
struct batadv_tt_local_entry *tt_local_entry,
- uint8_t event_flags)
+ u8 event_flags)
{
struct batadv_tt_change_node *tt_change_node, *entry, *safe;
struct batadv_tt_common_entry *common = &tt_local_entry->common;
- uint8_t flags = common->flags | event_flags;
+ u8 flags = common->flags | event_flags;
bool event_removed = false;
bool del_op_requested, del_op_entry;
@@ -448,7 +448,7 @@ static int batadv_tt_len(int changes_num)
*
* Returns the number of entries.
*/
-static uint16_t batadv_tt_entries(uint16_t tt_len)
+static u16 batadv_tt_entries(u16 tt_len)
{
return tt_len / batadv_tt_len(1);
}
@@ -462,7 +462,8 @@ static uint16_t batadv_tt_entries(uint16_t tt_len)
*/
static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
{
- uint16_t num_vlan = 0, tt_local_entries = 0;
+ u16 num_vlan = 0;
+ u16 tt_local_entries = 0;
struct batadv_softif_vlan *vlan;
int hdr_size;
@@ -525,8 +526,8 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
*
* Returns true if the client was successfully added, false otherwise.
*/
-bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- unsigned short vid, int ifindex, uint32_t mark)
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+ unsigned short vid, int ifindex, u32 mark)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_tt_local_entry *tt_local;
@@ -536,9 +537,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry;
int hash_added, table_size, packet_size_max;
- bool ret = false, roamed_back = false;
- uint8_t remote_flags;
- uint32_t match_mark;
+ bool ret = false;
+ bool roamed_back = false;
+ u8 remote_flags;
+ u32 match_mark;
if (ifindex != BATADV_NULL_IFINDEX)
in_dev = dev_get_by_index(&init_net, ifindex);
@@ -605,7 +607,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
addr, BATADV_PRINT_VID(vid),
- (uint8_t)atomic_read(&bat_priv->tt.vn));
+ (u8)atomic_read(&bat_priv->tt.vn));
ether_addr_copy(tt_local->common.addr, addr);
/* The local entry has to be marked as NEW to avoid to send it in
@@ -724,19 +726,22 @@ out:
*
* Return the size of the allocated buffer or 0 in case of failure.
*/
-static uint16_t
+static u16
batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
struct batadv_tvlv_tt_data **tt_data,
struct batadv_tvlv_tt_change **tt_change,
- int32_t *tt_len)
+ s32 *tt_len)
{
- uint16_t num_vlan = 0, num_entries = 0, change_offset, tvlv_len;
+ u16 num_vlan = 0;
+ u16 num_entries = 0;
+ u16 change_offset;
+ u16 tvlv_len;
struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_orig_node_vlan *vlan;
- uint8_t *tt_change_ptr;
+ u8 *tt_change_ptr;
rcu_read_lock();
- list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
num_vlan++;
num_entries += atomic_read(&vlan->tt.num_entries);
}
@@ -762,14 +767,14 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
(*tt_data)->num_vlan = htons(num_vlan);
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
- list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
tt_vlan++;
}
- tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+ tt_change_ptr = (u8 *)*tt_data + change_offset;
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
out:
@@ -795,16 +800,18 @@ out:
*
* Return the size of the allocated buffer or 0 in case of failure.
*/
-static uint16_t
+static u16
batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data **tt_data,
struct batadv_tvlv_tt_change **tt_change,
- int32_t *tt_len)
+ s32 *tt_len)
{
struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_softif_vlan *vlan;
- uint16_t num_vlan = 0, num_entries = 0, tvlv_len;
- uint8_t *tt_change_ptr;
+ u16 num_vlan = 0;
+ u16 num_entries = 0;
+ u16 tvlv_len;
+ u8 *tt_change_ptr;
int change_offset;
rcu_read_lock();
@@ -841,7 +848,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
tt_vlan++;
}
- tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+ tt_change_ptr = (u8 *)*tt_data + change_offset;
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
out:
@@ -860,8 +867,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
struct batadv_tvlv_tt_data *tt_data;
struct batadv_tvlv_tt_change *tt_change;
int tt_diff_len, tt_change_len = 0;
- int tt_diff_entries_num = 0, tt_diff_entries_count = 0;
- uint16_t tvlv_len;
+ int tt_diff_entries_num = 0;
+ int tt_diff_entries_count = 0;
+ u16 tvlv_len;
tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
tt_diff_len = batadv_tt_len(tt_diff_entries_num);
@@ -935,12 +943,12 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_softif_vlan *vlan;
struct hlist_head *head;
unsigned short vid;
- uint32_t i;
+ u32 i;
int last_seen_secs;
int last_seen_msecs;
unsigned long last_seen_jiffies;
bool no_purge;
- uint16_t np_flag = BATADV_TT_CLIENT_NOPURGE;
+ u16 np_flag = BATADV_TT_CLIENT_NOPURGE;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
@@ -948,7 +956,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
"Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
- net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+ net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
seq_printf(seq, " %-13s %s %-8s %-9s (%-10s)\n", "Client", "VID",
"Flags", "Last seen", "CRC");
@@ -1008,7 +1016,7 @@ out:
static void
batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
struct batadv_tt_local_entry *tt_local_entry,
- uint16_t flags, const char *message)
+ u16 flags, const char *message)
{
batadv_tt_local_event(bat_priv, tt_local_entry, flags);
@@ -1034,12 +1042,12 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
*
* Returns the flags assigned to the local entry before being deleted
*/
-uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
- const uint8_t *addr, unsigned short vid,
- const char *message, bool roaming)
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
+ unsigned short vid, const char *message,
+ bool roaming)
{
struct batadv_tt_local_entry *tt_local_entry;
- uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+ u16 flags, curr_flags = BATADV_NO_FLAGS;
struct batadv_softif_vlan *vlan;
void *tt_entry_exists;
@@ -1142,7 +1150,7 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- uint32_t i;
+ u32 i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1163,7 +1171,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
struct batadv_softif_vlan *vlan;
struct hlist_node *node_tmp;
struct hlist_head *head;
- uint32_t i;
+ u32 i;
if (!bat_priv->tt.local_hash)
return;
@@ -1338,15 +1346,14 @@ out:
static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *tt_addr,
- unsigned short vid, uint16_t flags,
- uint8_t ttvn)
+ unsigned short vid, u16 flags, u8 ttvn)
{
struct batadv_tt_global_entry *tt_global_entry;
struct batadv_tt_local_entry *tt_local_entry;
bool ret = false;
int hash_added;
struct batadv_tt_common_entry *common;
- uint16_t local_flags;
+ u16 local_flags;
/* ignore global entries from backbone nodes */
if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
@@ -1543,8 +1550,8 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_orig_node_vlan *vlan;
struct hlist_head *head;
- uint8_t last_ttvn;
- uint16_t flags;
+ u8 last_ttvn;
+ u16 flags;
tt_common_entry = &tt_global_entry->common;
flags = tt_common_entry->flags;
@@ -1618,7 +1625,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_tt_global_entry *tt_global;
struct batadv_hard_iface *primary_if;
struct hlist_head *head;
- uint32_t i;
+ u32 i;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
@@ -1651,20 +1658,28 @@ out:
}
/**
- * batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
* @tt_global_entry: the global entry to remove the orig_entry from
* @orig_entry: the orig entry to remove and free
*
* Remove an orig_entry from its list in the given tt_global_entry and
* free this orig_entry afterwards.
+ *
+ * Caller must hold tt_global_entry->list_lock and ensure orig_entry->list is
+ * part of a list.
*/
static void
-batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
- struct batadv_tt_orig_list_entry *orig_entry)
+_batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_tt_orig_list_entry *orig_entry)
{
+ lockdep_assert_held(&tt_global_entry->list_lock);
+
batadv_tt_global_size_dec(orig_entry->orig_node,
tt_global_entry->common.vid);
atomic_dec(&tt_global_entry->orig_list_count);
+ /* requires holding tt_global_entry->list_lock and orig_entry->list
+ * being part of a list
+ */
hlist_del_rcu(&orig_entry->list);
batadv_tt_orig_list_entry_free_ref(orig_entry);
}
@@ -1680,7 +1695,7 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, safe, head, list)
- batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
+ _batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
spin_unlock_bh(&tt_global_entry->list_lock);
}
@@ -1715,8 +1730,8 @@ batadv_tt_global_del_orig_node(struct batadv_priv *bat_priv,
orig_node->orig,
tt_global_entry->common.addr,
BATADV_PRINT_VID(vid), message);
- batadv_tt_global_del_orig_entry(tt_global_entry,
- orig_entry);
+ _batadv_tt_global_del_orig_entry(tt_global_entry,
+ orig_entry);
}
}
spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1838,12 +1853,12 @@ out:
*/
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- int32_t match_vid,
+ s32 match_vid,
const char *message)
{
struct batadv_tt_global_entry *tt_global;
struct batadv_tt_common_entry *tt_common_entry;
- uint32_t i;
+ u32 i;
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct hlist_node *safe;
struct hlist_head *head;
@@ -1914,7 +1929,7 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
struct hlist_head *head;
struct hlist_node *node_tmp;
spinlock_t *list_lock; /* protects write access to the hash lists */
- uint32_t i;
+ u32 i;
char *msg = NULL;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
@@ -1955,7 +1970,7 @@ static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
struct batadv_tt_global_entry *tt_global;
struct hlist_node *node_tmp;
struct hlist_head *head;
- uint32_t i;
+ u32 i;
if (!bat_priv->tt.global_hash)
return;
@@ -2016,8 +2031,8 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
* If the two clients are AP isolated the function returns NULL.
*/
struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
- const uint8_t *src,
- const uint8_t *addr,
+ const u8 *src,
+ const u8 *addr,
unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry = NULL;
@@ -2085,16 +2100,16 @@ out:
*
* Returns the checksum of the global table of a given originator.
*/
-static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- unsigned short vid)
+static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
struct hlist_head *head;
- uint32_t i, crc_tmp, crc = 0;
- uint8_t flags;
+ u32 i, crc_tmp, crc = 0;
+ u8 flags;
__be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
@@ -2162,14 +2177,14 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
*
* Returns the checksum of the local table
*/
-static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
- unsigned short vid)
+static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
+ unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct hlist_head *head;
- uint32_t i, crc_tmp, crc = 0;
- uint8_t flags;
+ u32 i, crc_tmp, crc = 0;
+ u8 flags;
__be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
@@ -2211,12 +2226,13 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
- struct batadv_tt_req_node *node, *safe;
+ struct batadv_tt_req_node *node;
+ struct hlist_node *safe;
spin_lock_bh(&bat_priv->tt.req_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
- list_del(&node->list);
+ hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+ hlist_del_init(&node->list);
kfree(node);
}
@@ -2226,7 +2242,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const void *tt_buff,
- uint16_t tt_buff_len)
+ u16 tt_buff_len)
{
/* Replace the old buffer only if I received something in the
* last OGM (the OGM could carry no changes)
@@ -2246,30 +2262,36 @@ static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
{
- struct batadv_tt_req_node *node, *safe;
+ struct batadv_tt_req_node *node;
+ struct hlist_node *safe;
spin_lock_bh(&bat_priv->tt.req_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+ hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
- list_del(&node->list);
+ hlist_del_init(&node->list);
kfree(node);
}
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
}
-/* returns the pointer to the new tt_req_node struct if no request
- * has already been issued for this orig_node, NULL otherwise
+/**
+ * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node this request is being issued for
+ *
+ * Returns the pointer to the new tt_req_node struct if no request
+ * has already been issued for this orig_node, NULL otherwise.
*/
static struct batadv_tt_req_node *
-batadv_new_tt_req_node(struct batadv_priv *bat_priv,
+batadv_tt_req_node_new(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
spin_lock_bh(&bat_priv->tt.req_list_lock);
- list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
+ hlist_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
!batadv_has_timed_out(tt_req_node_tmp->issued_at,
BATADV_TT_REQUEST_TIMEOUT))
@@ -2283,7 +2305,7 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
ether_addr_copy(tt_req_node->addr, orig_node->orig);
tt_req_node->issued_at = jiffies;
- list_add(&tt_req_node->list, &bat_priv->tt.req_list);
+ hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
unlock:
spin_unlock_bh(&bat_priv->tt.req_list_lock);
return tt_req_node;
@@ -2335,15 +2357,15 @@ static int batadv_tt_global_valid(const void *entry_ptr,
*/
static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
- void *tvlv_buff, uint16_t tt_len,
+ void *tvlv_buff, u16 tt_len,
int (*valid_cb)(const void *, const void *),
void *cb_data)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tvlv_tt_change *tt_change;
struct hlist_head *head;
- uint16_t tt_tot, tt_num_entries = 0;
- uint32_t i;
+ u16 tt_tot, tt_num_entries = 0;
+ u32 i;
tt_tot = batadv_tt_entries(tt_len);
tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
@@ -2385,11 +2407,11 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
*/
static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
struct batadv_tvlv_tt_vlan_data *tt_vlan,
- uint16_t num_vlan)
+ u16 num_vlan)
{
struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
struct batadv_orig_node_vlan *vlan;
- uint32_t crc;
+ u32 crc;
int i;
/* check if each received CRC matches the locally stored one */
@@ -2444,11 +2466,11 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_orig_node_vlan *vlan;
- uint32_t crc;
+ u32 crc;
/* recompute the global CRC for each VLAN */
rcu_read_lock();
- list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
/* if orig_node is a backbone node for this VLAN, don't compute
* the CRC as we ignore all the global entries over it
*/
@@ -2474,9 +2496,9 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
*/
static int batadv_send_tt_request(struct batadv_priv *bat_priv,
struct batadv_orig_node *dst_orig_node,
- uint8_t ttvn,
+ u8 ttvn,
struct batadv_tvlv_tt_vlan_data *tt_vlan,
- uint16_t num_vlan, bool full_table)
+ u16 num_vlan, bool full_table)
{
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_tt_req_node *tt_req_node = NULL;
@@ -2492,7 +2514,7 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
/* The new tt_req will be issued only if I'm not waiting for a
* reply from the same orig_node yet
*/
- tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
+ tt_req_node = batadv_tt_req_node_new(bat_priv, dst_orig_node);
if (!tt_req_node)
goto out;
@@ -2534,7 +2556,8 @@ out:
batadv_hardif_free_ref(primary_if);
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
- list_del(&tt_req_node->list);
+ /* hlist_del_init() verifies tt_req_node still is in the list */
+ hlist_del_init(&tt_req_node->list);
spin_unlock_bh(&bat_priv->tt.req_list_lock);
kfree(tt_req_node);
}
@@ -2554,7 +2577,7 @@ out:
*/
static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
- uint8_t *req_src, uint8_t *req_dst)
+ u8 *req_src, u8 *req_dst)
{
struct batadv_orig_node *req_dst_orig_node;
struct batadv_orig_node *res_dst_orig_node = NULL;
@@ -2562,9 +2585,9 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_tvlv_tt_vlan_data *tt_vlan;
bool ret = false, full_table;
- uint8_t orig_ttvn, req_ttvn;
- uint16_t tvlv_len;
- int32_t tt_len;
+ u8 orig_ttvn, req_ttvn;
+ u16 tvlv_len;
+ s32 tt_len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
@@ -2580,7 +2603,7 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
if (!res_dst_orig_node)
goto out;
- orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
+ orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn);
req_ttvn = tt_data->ttvn;
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
@@ -2686,16 +2709,16 @@ out:
*/
static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
- uint8_t *req_src)
+ u8 *req_src)
{
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_tvlv_tt_change *tt_change;
struct batadv_orig_node *orig_node;
- uint8_t my_ttvn, req_ttvn;
- uint16_t tvlv_len;
+ u8 my_ttvn, req_ttvn;
+ u16 tvlv_len;
bool full_table;
- int32_t tt_len;
+ s32 tt_len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
@@ -2704,7 +2727,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
spin_lock_bh(&bat_priv->tt.commit_lock);
- my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+ my_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
req_ttvn = tt_data->ttvn;
orig_node = batadv_orig_hash_find(bat_priv, req_src);
@@ -2743,7 +2766,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
bat_priv->tt.last_changeset_len);
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
} else {
- req_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+ req_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
/* allocate the tvlv, put the tt_data and all the tt_vlan_data
* in the initial part
@@ -2804,7 +2827,7 @@ out:
*/
static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
- uint8_t *req_src, uint8_t *req_dst)
+ u8 *req_src, u8 *req_dst)
{
if (batadv_is_my_mac(bat_priv, req_dst))
return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
@@ -2815,7 +2838,7 @@ static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_tvlv_tt_change *tt_change,
- uint16_t tt_num_changes, uint8_t ttvn)
+ u16 tt_num_changes, u8 ttvn)
{
int i;
int roams;
@@ -2847,8 +2870,8 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_change *tt_change,
- uint8_t ttvn, uint8_t *resp_src,
- uint16_t num_entries)
+ u8 ttvn, u8 *resp_src,
+ u16 num_entries)
{
struct batadv_orig_node *orig_node;
@@ -2878,7 +2901,7 @@ out:
static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- uint16_t tt_num_changes, uint8_t ttvn,
+ u16 tt_num_changes, u8 ttvn,
struct batadv_tvlv_tt_change *tt_change)
{
_batadv_tt_update_changes(bat_priv, orig_node, tt_change,
@@ -2897,7 +2920,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
*
* Returns true if the client is served by this node, false otherwise.
*/
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry;
@@ -2928,13 +2951,14 @@ out:
*/
static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
- uint8_t *resp_src, uint16_t num_entries)
+ u8 *resp_src, u16 num_entries)
{
- struct batadv_tt_req_node *node, *safe;
+ struct batadv_tt_req_node *node;
+ struct hlist_node *safe;
struct batadv_orig_node *orig_node = NULL;
struct batadv_tvlv_tt_change *tt_change;
- uint8_t *tvlv_ptr = (uint8_t *)tt_data;
- uint16_t change_offset;
+ u8 *tvlv_ptr = (u8 *)tt_data;
+ u16 change_offset;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
@@ -2968,10 +2992,10 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
/* Delete the tt_req_node from pending tt_requests list */
spin_lock_bh(&bat_priv->tt.req_list_lock);
- list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+ hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (!batadv_compare_eth(node->addr, resp_src))
continue;
- list_del(&node->list);
+ hlist_del_init(&node->list);
kfree(node);
}
@@ -3017,8 +3041,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
*
* returns true if the ROAMING_ADV can be sent, false otherwise
*/
-static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
- uint8_t *client)
+static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
{
struct batadv_tt_roam_node *tt_roam_node;
bool ret = false;
@@ -3073,7 +3096,7 @@ unlock:
* for this particular roamed client has to be forwarded to the sender of the
* roaming message.
*/
-static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
unsigned short vid,
struct batadv_orig_node *orig_node)
{
@@ -3151,14 +3174,14 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
* @enable: whether to set or unset the flag
* @count: whether to increase the TT size by the number of changed entries
*/
-static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv,
- uint16_t flags, bool enable, bool count)
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
+ bool enable, bool count)
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common_entry;
- uint16_t changed_num = 0;
+ u16 changed_num = 0;
struct hlist_head *head;
- uint32_t i;
+ u32 i;
if (!hash)
return;
@@ -3200,7 +3223,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
- uint32_t i;
+ u32 i;
if (!hash)
return;
@@ -3248,6 +3271,8 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
*/
static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
{
+ lockdep_assert_held(&bat_priv->tt.commit_lock);
+
/* Update multicast addresses in local translation table */
batadv_mcast_mla_update(bat_priv);
@@ -3266,7 +3291,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
atomic_inc(&bat_priv->tt.vn);
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Local changes committed, updating to ttvn %u\n",
- (uint8_t)atomic_read(&bat_priv->tt.vn));
+ (u8)atomic_read(&bat_priv->tt.vn));
/* reset the sending counter */
atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
@@ -3285,8 +3310,8 @@ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt.commit_lock);
}
-bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
- uint8_t *dst, unsigned short vid)
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+ unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry = NULL;
struct batadv_tt_global_entry *tt_global_entry = NULL;
@@ -3334,11 +3359,11 @@ out:
*/
static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- const void *tt_buff, uint16_t tt_num_vlan,
+ const void *tt_buff, u16 tt_num_vlan,
struct batadv_tvlv_tt_change *tt_change,
- uint16_t tt_num_changes, uint8_t ttvn)
+ u16 tt_num_changes, u8 ttvn)
{
- uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ u8 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
struct batadv_tvlv_tt_vlan_data *tt_vlan;
bool full_table = true;
bool has_tt_init;
@@ -3417,7 +3442,7 @@ request_table:
* deleted later by a DEL or because of timeout
*/
bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr, unsigned short vid)
+ u8 *addr, unsigned short vid)
{
struct batadv_tt_global_entry *tt_global_entry;
bool ret = false;
@@ -3443,7 +3468,7 @@ out:
* to keep the latter consistent with the node TTVN
*/
bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr, unsigned short vid)
+ u8 *addr, unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry;
bool ret = false;
@@ -3529,13 +3554,13 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
*/
static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags, void *tvlv_value,
- uint16_t tvlv_value_len)
+ u8 flags, void *tvlv_value,
+ u16 tvlv_value_len)
{
struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_tvlv_tt_change *tt_change;
struct batadv_tvlv_tt_data *tt_data;
- uint16_t num_entries, num_vlan;
+ u16 num_entries, num_vlan;
if (tvlv_value_len < sizeof(*tt_data))
return;
@@ -3571,12 +3596,12 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
* otherwise.
*/
static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
- uint8_t *src, uint8_t *dst,
+ u8 *src, u8 *dst,
void *tvlv_value,
- uint16_t tvlv_value_len)
+ u16 tvlv_value_len)
{
struct batadv_tvlv_tt_data *tt_data;
- uint16_t tt_vlan_len, tt_num_entries;
+ u16 tt_vlan_len, tt_num_entries;
char tt_flag;
bool ret;
@@ -3652,9 +3677,9 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
* otherwise.
*/
static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
- uint8_t *src, uint8_t *dst,
+ u8 *src, u8 *dst,
void *tvlv_value,
- uint16_t tvlv_value_len)
+ u16 tvlv_value_len)
{
struct batadv_tvlv_roam_adv *roaming_adv;
struct batadv_orig_node *orig_node = NULL;
@@ -3736,7 +3761,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
* otherwise
*/
bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
- const uint8_t *addr, unsigned short vid)
+ const u8 *addr, unsigned short vid)
{
struct batadv_tt_global_entry *tt;
bool ret;
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 6acc25d3a..abd8e116e 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,44 +22,41 @@
#include <linux/types.h>
-struct batadv_orig_node;
-struct batadv_priv;
struct net_device;
struct seq_file;
int batadv_tt_init(struct batadv_priv *bat_priv);
-bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- unsigned short vid, int ifindex, uint32_t mark);
-uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
- const uint8_t *addr, unsigned short vid,
- const char *message, bool roaming);
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+ unsigned short vid, int ifindex, u32 mark);
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
+ const u8 *addr, unsigned short vid,
+ const char *message, bool roaming);
int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
- int32_t match_vid, const char *message);
+ s32 match_vid, const char *message);
int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
- const uint8_t *addr, unsigned short vid);
+ const u8 *addr, unsigned short vid);
struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
- const uint8_t *src,
- const uint8_t *addr,
+ const u8 *src, const u8 *addr,
unsigned short vid);
void batadv_tt_free(struct batadv_priv *bat_priv);
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid);
-bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
- uint8_t *dst, unsigned short vid);
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+ unsigned short vid);
void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr, unsigned short vid);
+ u8 *addr, unsigned short vid);
bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
- uint8_t *addr, unsigned short vid);
+ u8 *addr, unsigned short vid);
void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *addr,
unsigned short vid);
bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
- const uint8_t *addr, unsigned short vid);
+ const u8 *addr, unsigned short vid);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 55610a805..d260efd70 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -44,7 +44,7 @@ struct seq_file;
*
* *Please be careful: batadv_dat_addr_t must be UNSIGNED*
*/
-#define batadv_dat_addr_t uint16_t
+#define batadv_dat_addr_t u16
#endif /* CONFIG_BATMAN_ADV_DAT */
@@ -103,10 +103,10 @@ struct batadv_hard_iface_bat_iv {
*/
struct batadv_hard_iface {
struct list_head list;
- int16_t if_num;
+ s16 if_num;
char if_status;
struct net_device *net_dev;
- uint8_t num_bcasts;
+ u8 num_bcasts;
struct kobject *hardif_obj;
atomic_t refcount;
struct packet_type batman_adv_ptype;
@@ -132,8 +132,8 @@ struct batadv_orig_ifinfo {
struct hlist_node list;
struct batadv_hard_iface *if_outgoing;
struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
- uint32_t last_real_seqno;
- uint8_t last_ttl;
+ u32 last_real_seqno;
+ u8 last_ttl;
unsigned long batman_seqno_reset;
atomic_t refcount;
struct rcu_head rcu;
@@ -152,9 +152,9 @@ struct batadv_frag_table_entry {
struct hlist_head head;
spinlock_t lock; /* protects head */
unsigned long timestamp;
- uint16_t seqno;
- uint16_t size;
- uint16_t total_size;
+ u16 seqno;
+ u16 size;
+ u16 total_size;
};
/**
@@ -166,7 +166,7 @@ struct batadv_frag_table_entry {
struct batadv_frag_list_entry {
struct hlist_node list;
struct sk_buff *skb;
- uint8_t no;
+ u8 no;
};
/**
@@ -175,7 +175,7 @@ struct batadv_frag_list_entry {
* @num_entries: number of TT entries for this VLAN
*/
struct batadv_vlan_tt {
- uint32_t crc;
+ u32 crc;
atomic_t num_entries;
};
@@ -190,7 +190,7 @@ struct batadv_vlan_tt {
struct batadv_orig_node_vlan {
unsigned short vid;
struct batadv_vlan_tt tt;
- struct list_head list;
+ struct hlist_node list;
atomic_t refcount;
struct rcu_head rcu;
};
@@ -206,7 +206,7 @@ struct batadv_orig_node_vlan {
*/
struct batadv_orig_bat_iv {
unsigned long *bcast_own;
- uint8_t *bcast_own_sum;
+ u8 *bcast_own_sum;
/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
* neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
*/
@@ -260,7 +260,7 @@ struct batadv_orig_bat_iv {
* @bat_iv: B.A.T.M.A.N. IV private structure
*/
struct batadv_orig_node {
- uint8_t orig[ETH_ALEN];
+ u8 orig[ETH_ALEN];
struct hlist_head ifinfo_list;
struct batadv_orig_ifinfo *last_bonding_candidate;
#ifdef CONFIG_BATMAN_ADV_DAT
@@ -271,7 +271,7 @@ struct batadv_orig_node {
#ifdef CONFIG_BATMAN_ADV_MCAST
/* synchronizes mcast tvlv specific orig changes */
spinlock_t mcast_handler_lock;
- uint8_t mcast_flags;
+ u8 mcast_flags;
struct hlist_node mcast_want_all_unsnoopables_node;
struct hlist_node mcast_want_all_ipv4_node;
struct hlist_node mcast_want_all_ipv6_node;
@@ -280,12 +280,12 @@ struct batadv_orig_node {
unsigned long capa_initialized;
atomic_t last_ttvn;
unsigned char *tt_buff;
- int16_t tt_buff_len;
+ s16 tt_buff_len;
spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
/* prevents from changing the table while reading it */
spinlock_t tt_lock;
DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
- uint32_t last_bcast_seqno;
+ u32 last_bcast_seqno;
struct hlist_head neigh_list;
/* neigh_list_lock protects: neigh_list and router */
spinlock_t neigh_list_lock;
@@ -302,7 +302,7 @@ struct batadv_orig_node {
spinlock_t out_coding_list_lock; /* Protects out_coding_list */
#endif
struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
- struct list_head vlan_list;
+ struct hlist_head vlan_list;
spinlock_t vlan_list_lock; /* protects vlan_list */
struct batadv_orig_bat_iv bat_iv;
};
@@ -328,16 +328,14 @@ enum batadv_orig_capabilities {
* @orig_node: pointer to corresponding orig node
* @bandwidth_down: advertised uplink download bandwidth
* @bandwidth_up: advertised uplink upload bandwidth
- * @deleted: this struct is scheduled for deletion
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_gw_node {
struct hlist_node list;
struct batadv_orig_node *orig_node;
- uint32_t bandwidth_down;
- uint32_t bandwidth_up;
- unsigned long deleted;
+ u32 bandwidth_down;
+ u32 bandwidth_up;
atomic_t refcount;
struct rcu_head rcu;
};
@@ -358,7 +356,7 @@ struct batadv_gw_node {
struct batadv_neigh_node {
struct hlist_node list;
struct batadv_orig_node *orig_node;
- uint8_t addr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
struct hlist_head ifinfo_list;
spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
struct batadv_hard_iface *if_incoming;
@@ -378,11 +376,11 @@ struct batadv_neigh_node {
* @real_packet_count: counted result of real_bits
*/
struct batadv_neigh_ifinfo_bat_iv {
- uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
- uint8_t tq_index;
- uint8_t tq_avg;
+ u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+ u8 tq_index;
+ u8 tq_avg;
DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
- uint8_t real_packet_count;
+ u8 real_packet_count;
};
/**
@@ -398,7 +396,7 @@ struct batadv_neigh_ifinfo {
struct hlist_node list;
struct batadv_hard_iface *if_outgoing;
struct batadv_neigh_ifinfo_bat_iv bat_iv;
- uint8_t last_ttl;
+ u8 last_ttl;
atomic_t refcount;
struct rcu_head rcu;
};
@@ -411,7 +409,7 @@ struct batadv_neigh_ifinfo {
*/
#ifdef CONFIG_BATMAN_ADV_BLA
struct batadv_bcast_duplist_entry {
- uint8_t orig[ETH_ALEN];
+ u8 orig[ETH_ALEN];
__be32 crc;
unsigned long entrytime;
};
@@ -537,13 +535,13 @@ struct batadv_priv_tt {
struct list_head changes_list;
struct batadv_hashtable *local_hash;
struct batadv_hashtable *global_hash;
- struct list_head req_list;
+ struct hlist_head req_list;
struct list_head roam_list;
spinlock_t changes_list_lock; /* protects changes */
spinlock_t req_list_lock; /* protects req_list */
spinlock_t roam_list_lock; /* protects roam_list */
unsigned char *last_changeset;
- int16_t last_changeset_len;
+ s16 last_changeset_len;
/* protects last_changeset & last_changeset_len */
spinlock_t last_changeset_lock;
/* prevents from executing a commit while reading the table */
@@ -663,7 +661,7 @@ struct batadv_priv_mcast {
struct hlist_head want_all_unsnoopables_list;
struct hlist_head want_all_ipv4_list;
struct hlist_head want_all_ipv6_list;
- uint8_t flags;
+ u8 flags;
bool enabled;
atomic_t num_disabled;
atomic_t num_want_all_unsnoopables;
@@ -781,7 +779,7 @@ struct batadv_priv {
atomic_t mesh_state;
struct net_device *soft_iface;
struct net_device_stats stats;
- uint64_t __percpu *bat_counters; /* Per cpu counters */
+ u64 __percpu *bat_counters; /* Per cpu counters */
atomic_t aggregated_ogms;
atomic_t bonding;
atomic_t fragmentation;
@@ -803,8 +801,8 @@ struct batadv_priv {
#ifdef CONFIG_BATMAN_ADV_DEBUG
atomic_t log_level;
#endif
- uint32_t isolation_mark;
- uint32_t isolation_mark_mask;
+ u32 isolation_mark;
+ u32 isolation_mark_mask;
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
@@ -870,7 +868,7 @@ struct batadv_socket_client {
struct batadv_socket_packet {
struct list_head list;
size_t icmp_len;
- uint8_t icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
+ u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
};
/**
@@ -891,14 +889,14 @@ struct batadv_socket_packet {
*/
#ifdef CONFIG_BATMAN_ADV_BLA
struct batadv_bla_backbone_gw {
- uint8_t orig[ETH_ALEN];
+ u8 orig[ETH_ALEN];
unsigned short vid;
struct hlist_node hash_entry;
struct batadv_priv *bat_priv;
unsigned long lasttime;
atomic_t wait_periods;
atomic_t request_sent;
- uint16_t crc;
+ u16 crc;
atomic_t refcount;
struct rcu_head rcu;
};
@@ -914,7 +912,7 @@ struct batadv_bla_backbone_gw {
* @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_bla_claim {
- uint8_t addr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
unsigned short vid;
struct batadv_bla_backbone_gw *backbone_gw;
unsigned long lasttime;
@@ -936,10 +934,10 @@ struct batadv_bla_claim {
* @rcu: struct used for freeing in an RCU-safe manner
*/
struct batadv_tt_common_entry {
- uint8_t addr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
unsigned short vid;
struct hlist_node hash_entry;
- uint16_t flags;
+ u16 flags;
unsigned long added_at;
atomic_t refcount;
struct rcu_head rcu;
@@ -981,7 +979,7 @@ struct batadv_tt_global_entry {
*/
struct batadv_tt_orig_list_entry {
struct batadv_orig_node *orig_node;
- uint8_t ttvn;
+ u8 ttvn;
struct hlist_node list;
atomic_t refcount;
struct rcu_head rcu;
@@ -1004,9 +1002,9 @@ struct batadv_tt_change_node {
* @list: list node for batadv_priv_tt::req_list
*/
struct batadv_tt_req_node {
- uint8_t addr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
unsigned long issued_at;
- struct list_head list;
+ struct hlist_node list;
};
/**
@@ -1018,7 +1016,7 @@ struct batadv_tt_req_node {
* @list: list node for batadv_priv_tt::roam_list
*/
struct batadv_tt_roam_node {
- uint8_t addr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
atomic_t counter;
unsigned long first_time;
struct list_head list;
@@ -1035,7 +1033,7 @@ struct batadv_tt_roam_node {
*/
struct batadv_nc_node {
struct list_head list;
- uint8_t addr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
atomic_t refcount;
struct rcu_head rcu;
struct batadv_orig_node *orig_node;
@@ -1059,8 +1057,8 @@ struct batadv_nc_path {
atomic_t refcount;
struct list_head packet_list;
spinlock_t packet_list_lock; /* Protects packet_list */
- uint8_t next_hop[ETH_ALEN];
- uint8_t prev_hop[ETH_ALEN];
+ u8 next_hop[ETH_ALEN];
+ u8 prev_hop[ETH_ALEN];
unsigned long last_valid;
};
@@ -1112,11 +1110,11 @@ struct batadv_skb_cb {
struct batadv_forw_packet {
struct hlist_node list;
unsigned long send_time;
- uint8_t own;
+ u8 own;
struct sk_buff *skb;
- uint16_t packet_len;
- uint32_t direct_link_flags;
- uint8_t num_packets;
+ u16 packet_len;
+ u32 direct_link_flags;
+ u8 num_packets;
struct delayed_work delayed_work;
struct batadv_hard_iface *if_incoming;
struct batadv_hard_iface *if_outgoing;
@@ -1191,7 +1189,7 @@ struct batadv_algo_ops {
*/
struct batadv_dat_entry {
__be32 ip;
- uint8_t mac_addr[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
unsigned short vid;
unsigned long last_update;
struct hlist_node hash_entry;
@@ -1253,14 +1251,13 @@ struct batadv_tvlv_handler {
struct hlist_node list;
void (*ogm_handler)(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
- uint8_t flags,
- void *tvlv_value, uint16_t tvlv_value_len);
+ u8 flags, void *tvlv_value, u16 tvlv_value_len);
int (*unicast_handler)(struct batadv_priv *bat_priv,
- uint8_t *src, uint8_t *dst,
- void *tvlv_value, uint16_t tvlv_value_len);
- uint8_t type;
- uint8_t version;
- uint8_t flags;
+ u8 *src, u8 *dst,
+ void *tvlv_value, u16 tvlv_value_len);
+ u8 type;
+ u8 version;
+ u8 flags;
atomic_t refcount;
struct rcu_head rcu;
};
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 2fb7b3064..131e79cde 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -85,7 +85,7 @@ struct lowpan_dev {
static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
{
- return netdev_priv(netdev);
+ return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
}
static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
@@ -848,8 +848,9 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
struct net_device *netdev;
int err = 0;
- netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
- NET_NAME_UNKNOWN, netdev_setup);
+ netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+ IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
+ netdev_setup);
if (!netdev)
return -ENOMEM;
@@ -859,9 +860,24 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
SET_NETDEV_DEVTYPE(netdev, &bt_type);
+ *dev = lowpan_dev(netdev);
+ (*dev)->netdev = netdev;
+ (*dev)->hdev = chan->conn->hcon->hdev;
+ INIT_LIST_HEAD(&(*dev)->peers);
+
+ spin_lock(&devices_lock);
+ INIT_LIST_HEAD(&(*dev)->list);
+ list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
+ spin_unlock(&devices_lock);
+
+ lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
+
err = register_netdev(netdev);
if (err < 0) {
BT_INFO("register_netdev failed %d", err);
+ spin_lock(&devices_lock);
+ list_del_rcu(&(*dev)->list);
+ spin_unlock(&devices_lock);
free_netdev(netdev);
goto out;
}
@@ -871,16 +887,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
&chan->src, chan->src_type);
set_bit(__LINK_STATE_PRESENT, &netdev->state);
- *dev = netdev_priv(netdev);
- (*dev)->netdev = netdev;
- (*dev)->hdev = chan->conn->hcon->hdev;
- INIT_LIST_HEAD(&(*dev)->peers);
-
- spin_lock(&devices_lock);
- INIT_LIST_HEAD(&(*dev)->list);
- list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
- spin_unlock(&devices_lock);
-
return 0;
out:
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index b8c794b87..95d1a66ba 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -53,6 +53,11 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
+config BT_HS
+ bool "Bluetooth High Speed (HS) features"
+ depends on BT_BREDR
+ default y
+
config BT_LE
bool "Bluetooth Low Energy (LE) features"
depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 29c12ae72..2b15ae8c1 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,9 +13,10 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
- a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
+ ecc.o hci_request.o mgmt_util.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
+bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 5a04eb1a7..5f123c332 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -16,6 +16,7 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
+#include "hci_request.h"
#include "a2mp.h"
#include "amp.h"
@@ -286,11 +287,21 @@ static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
return 0;
}
+static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ a2mp_send_getinfo_rsp(hdev);
+}
+
static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_info_req *req = (void *) skb->data;
struct hci_dev *hdev;
+ struct hci_request hreq;
+ int err = 0;
if (le16_to_cpu(hdr->len) < sizeof(*req))
return -EINVAL;
@@ -311,7 +322,11 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
}
set_bit(READ_LOC_AMP_INFO, &mgr->state);
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+ hci_req_init(&hreq, hdev);
+ hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+ err = hci_req_run(&hreq, read_local_amp_info_complete);
+ if (err < 0)
+ a2mp_send_getinfo_rsp(hdev);
done:
if (hdev)
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
index 296f665ad..a4ff3ea9b 100644
--- a/net/bluetooth/a2mp.h
+++ b/net/bluetooth/a2mp.h
@@ -130,10 +130,29 @@ struct a2mp_physlink_rsp {
#define A2MP_STATUS_SECURITY_VIOLATION 0x06
struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+
+#if IS_ENABLED(CONFIG_BT_HS)
int amp_mgr_put(struct amp_mgr *mgr);
struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
struct sk_buff *skb);
void a2mp_discover_amp(struct l2cap_chan *chan);
+#else
+static inline int amp_mgr_put(struct amp_mgr *mgr)
+{
+ return 0;
+}
+
+static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+
+static inline void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+}
+#endif
+
void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index ee016f039..e32f34189 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -16,6 +16,7 @@
#include <net/bluetooth/hci_core.h>
#include <crypto/hash.h>
+#include "hci_request.h"
#include "a2mp.h"
#include "amp.h"
@@ -220,10 +221,49 @@ int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
}
+static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
+ struct amp_assoc *assoc = &hdev->loc_assoc;
+ size_t rem_len, frag_len;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ goto send_rsp;
+
+ frag_len = skb->len - sizeof(*rp);
+ rem_len = __le16_to_cpu(rp->rem_len);
+
+ if (rem_len > frag_len) {
+ BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+ memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+ assoc->offset += frag_len;
+
+ /* Read other fragments */
+ amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+ return;
+ }
+
+ memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+ assoc->len = assoc->offset + rem_len;
+ assoc->offset = 0;
+
+send_rsp:
+ /* Send A2MP Rsp when all fragments are received */
+ a2mp_send_getampassoc_rsp(hdev, rp->status);
+ a2mp_send_create_phy_link_req(hdev, rp->status);
+}
+
void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
{
struct hci_cp_read_local_amp_assoc cp;
struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+ struct hci_request req;
+ int err = 0;
BT_DBG("%s handle %d", hdev->name, phy_handle);
@@ -231,12 +271,18 @@ void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
cp.len_so_far = cpu_to_le16(loc_assoc->offset);
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+ if (err < 0)
+ a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
}
void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
{
struct hci_cp_read_local_amp_assoc cp;
+ struct hci_request req;
+ int err = 0;
memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
memset(&cp, 0, sizeof(cp));
@@ -244,7 +290,11 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_run_skb(&req, read_local_amp_assoc_complete);
+ if (err < 0)
+ a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
}
void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
@@ -252,6 +302,8 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
{
struct hci_cp_read_local_amp_assoc cp;
struct amp_mgr *mgr = hcon->amp_mgr;
+ struct hci_request req;
+ int err = 0;
cp.phy_handle = hcon->handle;
cp.len_so_far = cpu_to_le16(0);
@@ -260,7 +312,25 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
/* Read Local AMP Assoc final link information data */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+ hci_req_run_skb(&req, read_local_amp_assoc_complete);
+ if (err < 0)
+ a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+}
+
+static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode, struct sk_buff *skb)
+{
+ struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
+
+ BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+ hdev->name, rp->status, rp->phy_handle);
+
+ if (rp->status)
+ return;
+
+ amp_write_rem_assoc_continue(hdev, rp->phy_handle);
}
/* Write AMP Assoc data fragments, returns true with last fragment written*/
@@ -270,6 +340,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
struct hci_cp_write_remote_amp_assoc *cp;
struct amp_mgr *mgr = hcon->amp_mgr;
struct amp_ctrl *ctrl;
+ struct hci_request req;
u16 frag_len, len;
ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
@@ -307,7 +378,9 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
amp_ctrl_put(ctrl);
- hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+ hci_req_run_skb(&req, write_remote_amp_assoc_complete);
kfree(cp);
@@ -344,10 +417,37 @@ void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
amp_write_rem_assoc_frag(hdev, hcon);
}
+static void create_phylink_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct hci_cp_create_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (status) {
+ struct hci_conn *hcon;
+
+ hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+ if (hcon)
+ hci_conn_del(hcon);
+ } else {
+ amp_write_remote_assoc(hdev, cp->phy_handle);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon)
{
struct hci_cp_create_phy_link cp;
+ struct hci_request req;
cp.phy_handle = hcon->handle;
@@ -360,13 +460,33 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
return;
}
- hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+ hci_req_run(&req, create_phylink_complete);
+}
+
+static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct hci_cp_accept_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+ if (!cp)
+ return;
+
+ amp_write_remote_assoc(hdev, cp->phy_handle);
}
void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon)
{
struct hci_cp_accept_phy_link cp;
+ struct hci_request req;
cp.phy_handle = hcon->handle;
@@ -379,7 +499,9 @@ void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
return;
}
- hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+ hci_req_init(&req, hdev);
+ hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+ hci_req_run(&req, accept_phylink_complete);
}
void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
index 7ea3db77b..8848f8158 100644
--- a/net/bluetooth/amp.h
+++ b/net/bluetooth/amp.h
@@ -44,6 +44,20 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon);
void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon);
+
+#if IS_ENABLED(CONFIG_BT_HS)
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+#else
+static inline void amp_create_logical_link(struct l2cap_chan *chan)
+{
+}
+
+static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+}
+#endif
+
void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index b0c6c6af7..9a5033877 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -100,9 +100,9 @@ static void cmtp_application_del(struct cmtp_session *session, struct cmtp_appli
static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value)
{
struct cmtp_application *app;
- struct list_head *p, *n;
+ struct list_head *p;
- list_for_each_safe(p, n, &session->applications) {
+ list_for_each(p, &session->applications) {
app = list_entry(p, struct cmtp_application, list);
switch (pattern) {
case CMTP_MSGNUM:
@@ -511,13 +511,13 @@ static int cmtp_proc_show(struct seq_file *m, void *v)
struct capi_ctr *ctrl = m->private;
struct cmtp_session *session = ctrl->driverdata;
struct cmtp_application *app;
- struct list_head *p, *n;
+ struct list_head *p;
seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
seq_printf(m, "addr %s\n", session->name);
seq_printf(m, "ctrl %d\n", session->num);
- list_for_each_safe(p, n, &session->applications) {
+ list_for_each(p, &session->applications) {
app = list_entry(p, struct cmtp_application, list);
seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 2c48bf0b5..ec4836f24 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -64,6 +64,126 @@ static void hci_le_create_connection_cancel(struct hci_conn *conn)
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
}
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+{
+ struct hci_conn_params *params;
+ struct smp_irk *irk;
+ bdaddr_t *bdaddr;
+ u8 bdaddr_type;
+
+ bdaddr = &conn->dst;
+ bdaddr_type = conn->dst_type;
+
+ /* Check if we need to convert to identity address */
+ irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+ if (irk) {
+ bdaddr = &irk->bdaddr;
+ bdaddr_type = irk->addr_type;
+ }
+
+ params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
+ if (!params)
+ return;
+
+ /* The connection attempt was doing scan for new RPA, and is
+ * in scan phase. If params are not associated with any other
+ * autoconnect action, remove them completely. If they are, just unmark
+ * them as waiting for connection, by clearing explicit_connect field.
+ */
+ params->explicit_connect = false;
+
+ list_del_init(&params->action);
+
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_EXPLICIT:
+ hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
+ /* return instead of break to avoid duplicate scan update */
+ return;
+ case HCI_AUTO_CONN_DIRECT:
+ case HCI_AUTO_CONN_ALWAYS:
+ list_add(&params->action, &conn->hdev->pend_le_conns);
+ break;
+ case HCI_AUTO_CONN_REPORT:
+ list_add(&params->action, &conn->hdev->pend_le_reports);
+ break;
+ default:
+ break;
+ }
+
+ hci_update_background_scan(conn->hdev);
+}
+
+static void hci_conn_cleanup(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
+ hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
+
+ hci_chan_list_flush(conn);
+
+ hci_conn_hash_del(hdev, conn);
+
+ if (hdev->notify)
+ hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+
+ hci_conn_del_sysfs(conn);
+
+ debugfs_remove_recursive(conn->debugfs);
+
+ hci_dev_put(hdev);
+
+ hci_conn_put(conn);
+}
+
+static void le_scan_cleanup(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ le_scan_cleanup);
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_conn *c = NULL;
+
+ BT_DBG("%s hcon %p", hdev->name, conn);
+
+ hci_dev_lock(hdev);
+
+ /* Check that the hci_conn is still around */
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
+ if (c == conn)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (c == conn) {
+ hci_connect_le_scan_cleanup(conn);
+ hci_conn_cleanup(conn);
+ }
+
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
+ hci_conn_put(conn);
+}
+
+static void hci_connect_le_scan_remove(struct hci_conn *conn)
+{
+ BT_DBG("%s hcon %p", conn->hdev->name, conn);
+
+ /* We can't call hci_conn_del/hci_conn_cleanup here since that
+ * could deadlock with another hci_conn_del() call that's holding
+ * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
+ * Instead, grab temporary extra references to the hci_dev and
+ * hci_conn and perform the necessary cleanup in a separate work
+ * callback.
+ */
+
+ hci_dev_hold(conn->hdev);
+ hci_conn_get(conn);
+
+ schedule_work(&conn->le_scan_cleanup);
+}
+
static void hci_acl_create_connection(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -340,8 +460,12 @@ static void hci_conn_timeout(struct work_struct *work)
if (conn->out) {
if (conn->type == ACL_LINK)
hci_acl_create_connection_cancel(conn);
- else if (conn->type == LE_LINK)
- hci_le_create_connection_cancel(conn);
+ else if (conn->type == LE_LINK) {
+ if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+ hci_connect_le_scan_remove(conn);
+ else
+ hci_le_create_connection_cancel(conn);
+ }
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
hci_reject_sco(conn);
}
@@ -489,6 +613,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
+ INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
atomic_set(&conn->refcnt, 0);
@@ -535,27 +660,17 @@ int hci_conn_del(struct hci_conn *conn)
}
}
- hci_chan_list_flush(conn);
-
if (conn->amp_mgr)
amp_mgr_put(conn->amp_mgr);
- hci_conn_hash_del(hdev, conn);
- if (hdev->notify)
- hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
-
skb_queue_purge(&conn->data_q);
- hci_conn_del_sysfs(conn);
-
- debugfs_remove_recursive(conn->debugfs);
-
- if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
- hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
-
- hci_dev_put(hdev);
-
- hci_conn_put(conn);
+ /* Remove the connection from the list and cleanup its remaining
+ * state. This is a separate function since for some cases like
+ * BT_CONNECT_SCAN we *only* want the cleanup part without the
+ * rest of hci_conn_del.
+ */
+ hci_conn_cleanup(conn);
return 0;
}
@@ -637,15 +752,18 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct hci_conn *conn;
- if (status == 0)
- return;
+ hci_dev_lock(hdev);
+
+ conn = hci_lookup_le_connect(hdev);
+
+ if (!status) {
+ hci_connect_le_scan_cleanup(conn);
+ goto done;
+ }
BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
status);
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (!conn)
goto done;
@@ -685,6 +803,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
conn->state = BT_CONNECT;
+ clear_bit(HCI_CONN_SCANNING, &conn->flags);
}
static void hci_req_directed_advertising(struct hci_request *req,
@@ -728,7 +847,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 role)
{
struct hci_conn_params *params;
- struct hci_conn *conn;
+ struct hci_conn *conn, *conn_unfinished;
struct smp_irk *irk;
struct hci_request req;
int err;
@@ -751,26 +870,29 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* and return the object found.
*/
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ conn_unfinished = NULL;
if (conn) {
- conn->pending_sec_level = sec_level;
- goto done;
+ if (conn->state == BT_CONNECT &&
+ test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+ BT_DBG("will continue unfinished conn %pMR", dst);
+ conn_unfinished = conn;
+ } else {
+ if (conn->pending_sec_level < sec_level)
+ conn->pending_sec_level = sec_level;
+ goto done;
+ }
}
/* Since the controller supports only one LE connection attempt at a
* time, we return -EBUSY if there is any connection attempt running.
*/
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
+ if (hci_lookup_le_connect(hdev))
return ERR_PTR(-EBUSY);
/* When given an identity address with existing identity
* resolving key, the connection needs to be established
* to a resolvable random address.
*
- * This uses the cached random resolvable address from
- * a previous scan. When no cached address is available,
- * try connecting to the identity address instead.
- *
* Storing the resolvable random address is required here
* to handle connection failures. The address will later
* be resolved back into the original identity address
@@ -782,15 +904,23 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
dst_type = ADDR_LE_DEV_RANDOM;
}
- conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ if (conn_unfinished) {
+ conn = conn_unfinished;
+ bacpy(&conn->dst, dst);
+ } else {
+ conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ }
+
if (!conn)
return ERR_PTR(-ENOMEM);
conn->dst_type = dst_type;
conn->sec_level = BT_SECURITY_LOW;
- conn->pending_sec_level = sec_level;
conn->conn_timeout = conn_timeout;
+ if (!conn_unfinished)
+ conn->pending_sec_level = sec_level;
+
hci_req_init(&req, hdev);
/* Disable advertising if we're active. For master role
@@ -855,6 +985,152 @@ create_conn:
}
done:
+ /* If this is continuation of connect started by hci_connect_le_scan,
+ * it already called hci_conn_hold and calling it again would mess the
+ * counter.
+ */
+ if (!conn_unfinished)
+ hci_conn_hold(conn);
+
+ return conn;
+}
+
+static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
+ u16 opcode)
+{
+ struct hci_conn *conn;
+
+ if (!status)
+ return;
+
+ BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
+ status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ hci_le_conn_failed(conn, status);
+
+ hci_dev_unlock(hdev);
+}
+
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+ if (!conn)
+ return false;
+
+ if (conn->dst_type != type)
+ return false;
+
+ if (conn->state != BT_CONNECTED)
+ return false;
+
+ return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_explicit_conn_params_set(struct hci_request *req,
+ bdaddr_t *addr, u8 addr_type)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_conn_params *params;
+
+ if (is_connected(hdev, addr, addr_type))
+ return -EISCONN;
+
+ params = hci_conn_params_lookup(hdev, addr, addr_type);
+ if (!params) {
+ params = hci_conn_params_add(hdev, addr, addr_type);
+ if (!params)
+ return -ENOMEM;
+
+ /* If we created new params, mark them to be deleted in
+ * hci_connect_le_scan_cleanup. It's different case than
+ * existing disabled params, those will stay after cleanup.
+ */
+ params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+ }
+
+ /* We're trying to connect, so make sure params are at pend_le_conns */
+ if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+ params->auto_connect == HCI_AUTO_CONN_REPORT ||
+ params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
+ list_del_init(&params->action);
+ list_add(&params->action, &hdev->pend_le_conns);
+ }
+
+ params->explicit_connect = true;
+ __hci_update_background_scan(req);
+
+ BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+ params->auto_connect);
+
+ return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level,
+ u16 conn_timeout, u8 role)
+{
+ struct hci_conn *conn;
+ struct hci_request req;
+ int err;
+
+ /* Let's make sure that le is enabled.*/
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+ if (lmp_le_capable(hdev))
+ return ERR_PTR(-ECONNREFUSED);
+
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* Some devices send ATT messages as soon as the physical link is
+ * established. To be able to handle these ATT messages, the user-
+ * space first establishes the connection and then starts the pairing
+ * process.
+ *
+ * So if a hci_conn object already exists for the following connection
+ * attempt, we simply update pending_sec_level and auth_type fields
+ * and return the object found.
+ */
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (conn) {
+ if (conn->pending_sec_level < sec_level)
+ conn->pending_sec_level = sec_level;
+ goto done;
+ }
+
+ BT_DBG("requesting refresh of dst_addr");
+
+ conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ hci_req_init(&req, hdev);
+
+ if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
+ return ERR_PTR(-EBUSY);
+
+ conn->state = BT_CONNECT;
+ set_bit(HCI_CONN_SCANNING, &conn->flags);
+
+ err = hci_req_run(&req, hci_connect_le_scan_complete);
+ if (err && err != -ENODATA) {
+ hci_conn_del(conn);
+ return ERR_PTR(err);
+ }
+
+ conn->dst_type = dst_type;
+ conn->sec_level = BT_SECURITY_LOW;
+ conn->pending_sec_level = sec_level;
+ conn->conn_timeout = conn_timeout;
+
+done:
hci_conn_hold(conn);
return conn;
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2f8fb3306..e83753945 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2822,10 +2822,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
{
struct hci_conn_params *params;
- /* The conn params list only contains identity addresses */
- if (!hci_is_identity_address(addr, addr_type))
- return NULL;
-
list_for_each_entry(params, &hdev->le_conn_params, list) {
if (bacmp(&params->addr, addr) == 0 &&
params->addr_type == addr_type) {
@@ -2842,10 +2838,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
{
struct hci_conn_params *param;
- /* The list only contains identity addresses */
- if (!hci_is_identity_address(addr, addr_type))
- return NULL;
-
list_for_each_entry(param, list, action) {
if (bacmp(&param->addr, addr) == 0 &&
param->addr_type == addr_type)
@@ -2856,14 +2848,28 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
}
/* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr,
+ u8 addr_type)
+{
+ struct hci_conn_params *param;
+
+ list_for_each_entry(param, &hdev->pend_le_conns, action) {
+ if (bacmp(&param->addr, addr) == 0 &&
+ param->addr_type == addr_type &&
+ param->explicit_connect)
+ return param;
+ }
+
+ return NULL;
+}
+
+/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type)
{
struct hci_conn_params *params;
- if (!hci_is_identity_address(addr, addr_type))
- return NULL;
-
params = hci_conn_params_lookup(hdev, addr, addr_type);
if (params)
return params;
@@ -2927,6 +2933,15 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
continue;
+
+ /* If trying to estabilish one time connection to disabled
+ * device, leave the params, but mark them as just once.
+ */
+ if (params->explicit_connect) {
+ params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+ continue;
+ }
+
list_del(&params->list);
kfree(params);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 32363c2b7..bc31099d3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -55,7 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
wake_up_bit(&hdev->flags, HCI_INQUIRY);
hci_dev_lock(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ /* Set discovery state to stopped if we're not doing LE active
+ * scanning.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+ hdev->le_scan_type != LE_SCAN_ACTIVE)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
hci_conn_check_pending(hdev);
@@ -823,7 +828,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
- goto a2mp_rsp;
+ return;
hdev->amp_status = rp->amp_status;
hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -835,46 +840,6 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
-a2mp_rsp:
- a2mp_send_getinfo_rsp(hdev);
-}
-
-static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
- struct amp_assoc *assoc = &hdev->loc_assoc;
- size_t rem_len, frag_len;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- if (rp->status)
- goto a2mp_rsp;
-
- frag_len = skb->len - sizeof(*rp);
- rem_len = __le16_to_cpu(rp->rem_len);
-
- if (rem_len > frag_len) {
- BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
- memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
- assoc->offset += frag_len;
-
- /* Read other fragments */
- amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
- return;
- }
-
- memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
- assoc->len = assoc->offset + rem_len;
- assoc->offset = 0;
-
-a2mp_rsp:
- /* Send A2MP Rsp when all fragments are received */
- a2mp_send_getampassoc_rsp(hdev, rp->status);
- a2mp_send_create_phy_link_req(hdev, rp->status);
}
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
@@ -1099,7 +1064,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_set_flag(hdev, HCI_LE_ADV);
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ conn = hci_lookup_le_connect(hdev);
if (conn)
queue_delayed_work(hdev->workqueue,
&conn->le_conn_timeout,
@@ -1409,20 +1374,6 @@ static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
- struct sk_buff *skb)
-{
- struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
- hdev->name, rp->status, rp->phy_handle);
-
- if (rp->status)
- return;
-
- amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_rssi *rp = (void *) skb->data;
@@ -1944,47 +1895,6 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_unlock(hdev);
}
-static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
-{
- struct hci_cp_create_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- if (status) {
- struct hci_conn *hcon;
-
- hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
- if (hcon)
- hci_conn_del(hcon);
- } else {
- amp_write_remote_assoc(hdev, cp->phy_handle);
- }
-
- hci_dev_unlock(hdev);
-}
-
-static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
-{
- struct hci_cp_accept_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
- if (!cp)
- return;
-
- amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_create_conn *cp;
@@ -2998,10 +2908,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_read_clock(hdev, skb);
break;
- case HCI_OP_READ_LOCAL_AMP_ASSOC:
- hci_cc_read_local_amp_assoc(hdev, skb);
- break;
-
case HCI_OP_READ_INQ_RSP_TX_POWER:
hci_cc_read_inq_rsp_tx_power(hdev, skb);
break;
@@ -3106,10 +3012,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_set_adv_param(hdev, skb);
break;
- case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
- hci_cc_write_remote_amp_assoc(hdev, skb);
- break;
-
case HCI_OP_READ_RSSI:
hci_cc_read_rssi(hdev, skb);
break;
@@ -3193,14 +3095,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cs_setup_sync_conn(hdev, ev->status);
break;
- case HCI_OP_CREATE_PHY_LINK:
- hci_cs_create_phylink(hdev, ev->status);
- break;
-
- case HCI_OP_ACCEPT_PHY_LINK:
- hci_cs_accept_phylink(hdev, ev->status);
- break;
-
case HCI_OP_SNIFF_MODE:
hci_cs_sniff_mode(hdev, ev->status);
break;
@@ -3837,17 +3731,25 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
if (ev->link_type == ESCO_LINK)
goto unlock;
+ /* When the link type in the event indicates SCO connection
+ * and lookup of the connection object fails, then check
+ * if an eSCO connection object exists.
+ *
+ * The core limits the synchronous connections to either
+ * SCO or eSCO. The eSCO connection is preferred and tried
+ * to be setup first and until successfully established,
+ * the link type will be hinted as eSCO.
+ */
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
-
- conn->type = SCO_LINK;
}
switch (ev->status) {
case 0x00:
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
+ conn->type = ev->link_type;
hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
@@ -4399,6 +4301,23 @@ unlock:
hci_dev_unlock(hdev);
}
+#if IS_ENABLED(CONFIG_BT_HS)
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_channel_selected *ev = (void *)skb->data;
+ struct hci_conn *hcon;
+
+ BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+ skb_pull(skb, sizeof(*ev));
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon)
+ return;
+
+ amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
static void hci_phy_link_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -4522,6 +4441,7 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
+#endif
static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
@@ -4540,7 +4460,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ conn = hci_lookup_le_connect(hdev);
if (!conn) {
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
if (!conn) {
@@ -4733,42 +4653,49 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
/* If we're not connectable only connect devices that we have in
* our pend_le_conns list.
*/
- params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
- addr, addr_type);
+ params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
+ addr_type);
if (!params)
return NULL;
- switch (params->auto_connect) {
- case HCI_AUTO_CONN_DIRECT:
- /* Only devices advertising with ADV_DIRECT_IND are
- * triggering a connection attempt. This is allowing
- * incoming connections from slave devices.
- */
- if (adv_type != LE_ADV_DIRECT_IND)
+ if (!params->explicit_connect) {
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_DIRECT:
+ /* Only devices advertising with ADV_DIRECT_IND are
+ * triggering a connection attempt. This is allowing
+ * incoming connections from slave devices.
+ */
+ if (adv_type != LE_ADV_DIRECT_IND)
+ return NULL;
+ break;
+ case HCI_AUTO_CONN_ALWAYS:
+ /* Devices advertising with ADV_IND or ADV_DIRECT_IND
+ * are triggering a connection attempt. This means
+ * that incoming connectioms from slave device are
+ * accepted and also outgoing connections to slave
+ * devices are established when found.
+ */
+ break;
+ default:
return NULL;
- break;
- case HCI_AUTO_CONN_ALWAYS:
- /* Devices advertising with ADV_IND or ADV_DIRECT_IND
- * are triggering a connection attempt. This means
- * that incoming connectioms from slave device are
- * accepted and also outgoing connections to slave
- * devices are established when found.
- */
- break;
- default:
- return NULL;
+ }
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
if (!IS_ERR(conn)) {
- /* Store the pointer since we don't really have any
+ /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
+ * by higher layer that tried to connect, if no then
+ * store the pointer since we don't really have any
* other owner of the object besides the params that
* triggered it. This way we can abort the connection if
* the parameters get removed and keep the reference
* count consistent once the connection is established.
*/
- params->conn = hci_conn_get(conn);
+
+ if (!params->explicit_connect)
+ params->conn = hci_conn_get(conn);
+
return conn;
}
@@ -5206,22 +5133,6 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_ev_channel_selected *ev = (void *) skb->data;
- struct hci_conn *hcon;
-
- BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
-
- skb_pull(skb, sizeof(*ev));
-
- hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
- if (!hcon)
- return;
-
- amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
u8 event, struct sk_buff *skb)
{
@@ -5442,14 +5353,15 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_le_meta_evt(hdev, skb);
break;
- case HCI_EV_CHANNEL_SELECTED:
- hci_chan_selected_evt(hdev, skb);
- break;
-
case HCI_EV_REMOTE_OOB_DATA_REQUEST:
hci_remote_oob_data_request_evt(hdev, skb);
break;
+#if IS_ENABLED(CONFIG_BT_HS)
+ case HCI_EV_CHANNEL_SELECTED:
+ hci_chan_selected_evt(hdev, skb);
+ break;
+
case HCI_EV_PHY_LINK_COMPLETE:
hci_phy_link_complete_evt(hdev, skb);
break;
@@ -5465,6 +5377,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
hci_disconn_phylink_complete_evt(hdev, skb);
break;
+#endif
case HCI_EV_NUM_COMP_BLOCKS:
hci_num_comp_blocks_evt(hdev, skb);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index d6025d6e6..b7369220c 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -317,7 +317,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
* address be updated at the next cycle.
*/
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
- hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+ hci_lookup_le_connect(hdev)) {
BT_DBG("Deferring random address update");
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return;
@@ -479,7 +479,6 @@ void hci_update_page_scan(struct hci_dev *hdev)
void __hci_update_background_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
- struct hci_conn *conn;
if (!test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
@@ -529,8 +528,7 @@ void __hci_update_background_scan(struct hci_request *req)
* since some controllers are not able to scan and connect at
* the same time.
*/
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
+ if (hci_lookup_le_connect(hdev))
return;
/* If controller is currently scanning, we stop it to ensure we
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index f1a117f8c..0bec4588c 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -401,6 +401,20 @@ static void hidp_idle_timeout(unsigned long arg)
{
struct hidp_session *session = (struct hidp_session *) arg;
+ /* The HIDP user-space API only contains calls to add and remove
+ * devices. There is no way to forward events of any kind. Therefore,
+ * we have to forcefully disconnect a device on idle-timeouts. This is
+ * unfortunate and weird API design, but it is spec-compliant and
+ * required for backwards-compatibility. Hence, on idle-timeout, we
+ * signal driver-detach events, so poll() will be woken up with an
+ * error-condition on both sockets.
+ */
+
+ session->intr_sock->sk->sk_err = EUNATCH;
+ session->ctrl_sock->sk->sk_err = EUNATCH;
+ wake_up_interruptible(sk_sleep(session->intr_sock->sk));
+ wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
+
hidp_session_terminate(session);
}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 45fffa413..7c65ee200 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7113,8 +7113,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
else
role = HCI_ROLE_MASTER;
- hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
- HCI_LE_CONN_TIMEOUT, role);
+ hcon = hci_connect_le_scan(hdev, dst, dst_type,
+ chan->sec_level,
+ HCI_LE_CONN_TIMEOUT,
+ role);
} else {
u8 auth_type = l2cap_get_auth_type(chan);
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 244287706..586b3d580 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1054,18 +1054,23 @@ static void l2cap_sock_kill(struct sock *sk)
sock_put(sk);
}
-static int __l2cap_wait_ack(struct sock *sk)
+static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan)
{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
DECLARE_WAITQUEUE(wait, current);
int err = 0;
- int timeo = HZ/5;
+ int timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
+ /* Timeout to prevent infinite loop */
+ unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT;
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
- while (chan->unacked_frames > 0 && chan->conn) {
+ do {
+ BT_DBG("Waiting for %d ACKs, timeout %04d ms",
+ chan->unacked_frames, time_after(jiffies, timeout) ? 0 :
+ jiffies_to_msecs(timeout - jiffies));
+
if (!timeo)
- timeo = HZ/5;
+ timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
@@ -1080,7 +1085,15 @@ static int __l2cap_wait_ack(struct sock *sk)
err = sock_error(sk);
if (err)
break;
- }
+
+ if (time_after(jiffies, timeout)) {
+ err = -ENOLINK;
+ break;
+ }
+
+ } while (chan->unacked_frames > 0 &&
+ chan->state == BT_CONNECTED);
+
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
@@ -1098,7 +1111,12 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
if (!sk)
return 0;
+ /* prevent sk structure from being freed whilst unlocked */
+ sock_hold(sk);
+
chan = l2cap_pi(sk)->chan;
+ /* prevent chan structure from being freed whilst unlocked */
+ l2cap_chan_hold(chan);
conn = chan->conn;
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1110,8 +1128,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
if (!sk->sk_shutdown) {
- if (chan->mode == L2CAP_MODE_ERTM)
- err = __l2cap_wait_ack(sk);
+ if (chan->mode == L2CAP_MODE_ERTM &&
+ chan->unacked_frames > 0 &&
+ chan->state == BT_CONNECTED)
+ err = __l2cap_wait_ack(sk, chan);
sk->sk_shutdown = SHUTDOWN_MASK;
@@ -1134,6 +1154,11 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
if (conn)
mutex_unlock(&conn->chan_lock);
+ l2cap_chan_put(chan);
+ sock_put(sk);
+
+ BT_DBG("err: %d", err);
+
return err;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 92720f3fe..72c9376ec 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -3090,6 +3090,11 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
} else {
u8 addr_type;
+ if (cp->addr.type == BDADDR_LE_PUBLIC)
+ addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ addr_type = ADDR_LE_DEV_RANDOM;
+
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
&cp->addr.bdaddr);
if (conn) {
@@ -3105,13 +3110,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
*/
if (!cp->disconnect)
conn = NULL;
+ } else {
+ hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
}
- if (cp->addr.type == BDADDR_LE_PUBLIC)
- addr_type = ADDR_LE_DEV_PUBLIC;
- else
- addr_type = ADDR_LE_DEV_RANDOM;
-
hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
@@ -3545,6 +3547,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
auth_type);
} else {
u8 addr_type;
+ struct hci_conn_params *p;
/* Convert from L2CAP channel address type to HCI address type
*/
@@ -3562,11 +3565,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
* If connection parameters already exist, then they
* will be kept and this function does nothing.
*/
- hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+ p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+
+ if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+ p->auto_connect = HCI_AUTO_CONN_DISABLED;
- conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
- sec_level, HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_MASTER);
+ conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
+ addr_type, sec_level,
+ HCI_LE_CONN_TIMEOUT,
+ HCI_ROLE_MASTER);
}
if (IS_ERR(conn)) {
@@ -4210,7 +4217,7 @@ static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
/* Don't let discovery abort an outgoing connection attempt
* that's using directed advertising.
*/
- if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+ if (hci_lookup_le_connect(hdev)) {
*status = MGMT_STATUS_REJECTED;
return false;
}
@@ -6107,17 +6114,30 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
switch (auto_connect) {
case HCI_AUTO_CONN_DISABLED:
case HCI_AUTO_CONN_LINK_LOSS:
+ /* If auto connect is being disabled when we're trying to
+ * connect to device, keep connecting.
+ */
+ if (params->explicit_connect)
+ list_add(&params->action, &hdev->pend_le_conns);
+
__hci_update_background_scan(req);
break;
case HCI_AUTO_CONN_REPORT:
- list_add(&params->action, &hdev->pend_le_reports);
+ if (params->explicit_connect)
+ list_add(&params->action, &hdev->pend_le_conns);
+ else
+ list_add(&params->action, &hdev->pend_le_reports);
__hci_update_background_scan(req);
break;
case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
if (!is_connected(hdev, addr, addr_type)) {
list_add(&params->action, &hdev->pend_le_conns);
- __hci_update_background_scan(req);
+ /* If we are in scan phase of connecting, we were
+ * already added to pend_le_conns and scanning.
+ */
+ if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
+ __hci_update_background_scan(req);
}
break;
}
@@ -6226,6 +6246,17 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
else
auto_conn = HCI_AUTO_CONN_REPORT;
+ /* Kernel internally uses conn_params with resolvable private
+ * address, but Add Device allows only identity addresses.
+ * Make sure it is enforced before calling
+ * hci_conn_params_lookup.
+ */
+ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+ err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
+ goto unlock;
+ }
+
/* If the connection parameters don't exist for this device,
* they will be created and configured with defaults.
*/
@@ -6340,6 +6371,18 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
else
addr_type = ADDR_LE_DEV_RANDOM;
+ /* Kernel internally uses conn_params with resolvable private
+ * address, but Remove Device allows only identity addresses.
+ * Make sure it is enforced before calling
+ * hci_conn_params_lookup.
+ */
+ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+ err = cmd->cmd_complete(cmd,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
+ goto unlock;
+ }
+
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
addr_type);
if (!params) {
@@ -6349,7 +6392,8 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+ if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+ params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
err = cmd->cmd_complete(cmd,
MGMT_STATUS_INVALID_PARAMS);
mgmt_pending_remove(cmd);
@@ -6385,6 +6429,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
continue;
device_removed(sk, hdev, &p->addr, p->addr_type);
+ if (p->explicit_connect) {
+ p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+ continue;
+ }
list_del(&p->action);
list_del(&p->list);
kfree(p);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 688a040c5..f315c8d0e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -154,13 +154,13 @@ static void sco_chan_del(struct sock *sk, int err)
sock_set_flag(sk, SOCK_ZAPPED);
}
-static int sco_conn_del(struct hci_conn *hcon, int err)
+static void sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
- return 0;
+ return;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
@@ -179,7 +179,6 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
hcon->sco_data = NULL;
kfree(conn);
- return 0;
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4ff77a169..6ed2feb51 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -339,6 +339,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
+ .ndo_features_check = passthru_features_check,
};
static void br_dev_free(struct net_device *dev)
@@ -364,8 +365,7 @@ void br_dev_setup(struct net_device *dev)
dev->destructor = br_dev_free;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
- dev->tx_queue_len = 0;
- dev->priv_flags = IFF_EBRIDGE;
+ dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a538cb119..45e4757c6 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -281,6 +281,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
br_fdb_delete_by_port(br, NULL, 0, 1);
br_vlan_flush(br);
+ br_multicast_dev_del(br);
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index c94321955..d747275fa 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -85,6 +85,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
memset(&e, 0, sizeof(e));
e.ifindex = port->dev->ifindex;
e.state = p->state;
+ e.vid = p->addr.vid;
if (p->addr.proto == htons(ETH_P_IP))
e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
@@ -230,7 +231,7 @@ errout:
}
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
- struct br_ip *group, int type)
+ struct br_ip *group, int type, u8 state)
{
struct br_mdb_entry entry;
@@ -241,9 +242,78 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
#if IS_ENABLED(CONFIG_IPV6)
entry.addr.u.ip6 = group->u.ip6;
#endif
+ entry.state = state;
+ entry.vid = group->vid;
__br_mdb_notify(dev, &entry, type);
}
+static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+ struct net_device *dev,
+ int ifindex, u32 pid,
+ u32 seq, int type, unsigned int flags)
+{
+ struct br_port_msg *bpm;
+ struct nlmsghdr *nlh;
+ struct nlattr *nest;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ bpm = nlmsg_data(nlh);
+ memset(bpm, 0, sizeof(*bpm));
+ bpm->family = AF_BRIDGE;
+ bpm->ifindex = dev->ifindex;
+ nest = nla_nest_start(skb, MDBA_ROUTER);
+ if (!nest)
+ goto cancel;
+
+ if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
+ goto end;
+
+ nla_nest_end(skb, nest);
+ nlmsg_end(skb, nlh);
+ return 0;
+
+end:
+ nla_nest_end(skb, nest);
+cancel:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline size_t rtnl_rtr_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct br_port_msg))
+ + nla_total_size(sizeof(__u32));
+}
+
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+ int type)
+{
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+ int ifindex;
+
+ ifindex = port ? port->dev->ifindex : 0;
+ skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
+ if (!skb)
+ goto errout;
+
+ err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
+ return;
+
+errout:
+ rtnl_set_sk_err(net, RTNLGRP_MDB, err);
+}
+
static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
{
if (entry->ifindex == 0)
@@ -263,6 +333,8 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
return false;
if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
return false;
+ if (entry->vid >= VLAN_VID_MASK)
+ return false;
return true;
}
@@ -374,6 +446,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
return -EINVAL;
memset(&ip, 0, sizeof(ip));
+ ip.vid = entry->vid;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
@@ -391,8 +464,11 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
+ unsigned short vid = VLAN_N_VID;
+ struct net_device *dev, *pdev;
struct br_mdb_entry *entry;
- struct net_device *dev;
+ struct net_bridge_port *p;
+ struct net_port_vlans *pv;
struct net_bridge *br;
int err;
@@ -402,9 +478,32 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
br = netdev_priv(dev);
- err = __br_mdb_add(net, br, entry);
- if (!err)
- __br_mdb_notify(dev, entry, RTM_NEWMDB);
+ /* If vlan filtering is enabled and VLAN is not specified
+ * install mdb entry on all vlans configured on the port.
+ */
+ pdev = __dev_get_by_index(net, entry->ifindex);
+ if (!pdev)
+ return -ENODEV;
+
+ p = br_port_get_rtnl(pdev);
+ if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ return -EINVAL;
+
+ pv = nbp_get_vlan_info(p);
+ if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ entry->vid = vid;
+ err = __br_mdb_add(net, br, entry);
+ if (err)
+ break;
+ __br_mdb_notify(dev, entry, RTM_NEWMDB);
+ }
+ } else {
+ err = __br_mdb_add(net, br, entry);
+ if (!err)
+ __br_mdb_notify(dev, entry, RTM_NEWMDB);
+ }
+
return err;
}
@@ -421,6 +520,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
return -EINVAL;
memset(&ip, 0, sizeof(ip));
+ ip.vid = entry->vid;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
@@ -465,8 +565,12 @@ unlock:
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
{
- struct net_device *dev;
+ struct net *net = sock_net(skb->sk);
+ unsigned short vid = VLAN_N_VID;
+ struct net_device *dev, *pdev;
struct br_mdb_entry *entry;
+ struct net_bridge_port *p;
+ struct net_port_vlans *pv;
struct net_bridge *br;
int err;
@@ -476,9 +580,31 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
br = netdev_priv(dev);
- err = __br_mdb_del(br, entry);
- if (!err)
- __br_mdb_notify(dev, entry, RTM_DELMDB);
+ /* If vlan filtering is enabled and VLAN is not specified
+ * delete mdb entry on all vlans configured on the port.
+ */
+ pdev = __dev_get_by_index(net, entry->ifindex);
+ if (!pdev)
+ return -ENODEV;
+
+ p = br_port_get_rtnl(pdev);
+ if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ return -EINVAL;
+
+ pv = nbp_get_vlan_info(p);
+ if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+ entry->vid = vid;
+ err = __br_mdb_del(br, entry);
+ if (!err)
+ __br_mdb_notify(dev, entry, RTM_DELMDB);
+ }
+ } else {
+ err = __br_mdb_del(br, entry);
+ if (!err)
+ __br_mdb_notify(dev, entry, RTM_DELMDB);
+ }
+
return err;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 03cdb9e18..480b3de1a 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,6 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
+ br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+ p->state);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist &&
@@ -704,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
if (unlikely(!p))
goto err;
rcu_assign_pointer(*pp, p);
- br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+ br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
found:
mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -764,6 +766,7 @@ static void br_multicast_router_expired(unsigned long data)
goto out;
hlist_del_init_rcu(&port->rlist);
+ br_rtr_notify(br->dev, port, RTM_DELMDB);
out:
spin_unlock(&br->multicast_lock);
@@ -924,6 +927,15 @@ void br_multicast_add_port(struct net_bridge_port *port)
void br_multicast_del_port(struct net_bridge_port *port)
{
+ struct net_bridge *br = port->br;
+ struct net_bridge_port_group *pg;
+ struct hlist_node *n;
+
+ /* Take care of the remaining groups, only perm ones should be left */
+ spin_lock_bh(&br->multicast_lock);
+ hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+ br_multicast_del_pg(br, pg);
+ spin_unlock_bh(&br->multicast_lock);
del_timer_sync(&port->multicast_router_timer);
}
@@ -963,10 +975,13 @@ void br_multicast_disable_port(struct net_bridge_port *port)
spin_lock(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
- br_multicast_del_pg(br, pg);
+ if (pg->state == MDB_TEMPORARY)
+ br_multicast_del_pg(br, pg);
- if (!hlist_unhashed(&port->rlist))
+ if (!hlist_unhashed(&port->rlist)) {
hlist_del_init_rcu(&port->rlist);
+ br_rtr_notify(br->dev, port, RTM_DELMDB);
+ }
del_timer(&port->multicast_router_timer);
del_timer(&port->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
@@ -1204,6 +1219,7 @@ static void br_multicast_add_router(struct net_bridge *br,
hlist_add_behind_rcu(&port->rlist, slot);
else
hlist_add_head_rcu(&port->rlist, &br->router_list);
+ br_rtr_notify(br->dev, port, RTM_NEWMDB);
}
static void br_multicast_mark_router(struct net_bridge *br,
@@ -1437,7 +1453,8 @@ br_multicast_leave_group(struct net_bridge *br,
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
- br_mdb_notify(br->dev, port, group, RTM_DELMDB);
+ br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+ p->state);
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
@@ -1754,12 +1771,6 @@ void br_multicast_open(struct net_bridge *br)
void br_multicast_stop(struct net_bridge *br)
{
- struct net_bridge_mdb_htable *mdb;
- struct net_bridge_mdb_entry *mp;
- struct hlist_node *n;
- u32 ver;
- int i;
-
del_timer_sync(&br->multicast_router_timer);
del_timer_sync(&br->ip4_other_query.timer);
del_timer_sync(&br->ip4_own_query.timer);
@@ -1767,6 +1778,15 @@ void br_multicast_stop(struct net_bridge *br)
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
+}
+
+void br_multicast_dev_del(struct net_bridge *br)
+{
+ struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_mdb_entry *mp;
+ struct hlist_node *n;
+ u32 ver;
+ int i;
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
@@ -1834,8 +1854,10 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
p->multicast_router = val;
err = 0;
- if (val < 2 && !hlist_unhashed(&p->rlist))
+ if (val < 2 && !hlist_unhashed(&p->rlist)) {
hlist_del_init_rcu(&p->rlist);
+ br_rtr_notify(br->dev, p, RTM_DELMDB);
+ }
if (val == 1)
break;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c8b9bcfe9..0a6f095bb 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -49,9 +49,9 @@ static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 0;
-static int brnf_filter_pppoe_tagged __read_mostly = 0;
-static int brnf_pass_vlan_indev __read_mostly = 0;
+static int brnf_filter_vlan_tagged __read_mostly;
+static int brnf_filter_pppoe_tagged __read_mostly;
+static int brnf_pass_vlan_indev __read_mostly;
#else
#define brnf_call_iptables 1
#define brnf_call_ip6tables 1
@@ -284,7 +284,7 @@ int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
nf_bridge->neigh_header,
ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
- nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+ nf_bridge->bridged_dnat = 1;
/* FIXME Need to refragment */
ret = neigh->output(neigh, skb);
}
@@ -356,7 +356,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
- nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+ nf_bridge->in_prerouting = 0;
if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -444,7 +444,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb)
nf_bridge->pkt_otherhost = true;
}
- nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
+ nf_bridge->in_prerouting = 1;
nf_bridge->physindev = skb->dev;
skb->dev = brnf_get_logical_dev(skb, skb->dev);
@@ -850,10 +850,8 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (skb->nf_bridge &&
- !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
+ if (skb->nf_bridge && !skb->nf_bridge->in_prerouting)
return NF_STOP;
- }
return NF_ACCEPT;
}
@@ -872,7 +870,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
skb_pull(skb, ETH_HLEN);
- nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
+ nf_bridge->bridged_dnat = 0;
BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
@@ -887,7 +885,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
static int br_nf_dev_xmit(struct sk_buff *skb)
{
- if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+ if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
br_nf_pre_routing_finish_bridge_slow(skb);
return 1;
}
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 13b7d1e3d..77383bfe7 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -174,7 +174,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->pkt_otherhost = false;
}
- nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+ nf_bridge->in_prerouting = 0;
if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
skb_dst_drop(skb);
v6ops->route_input(skb);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4d74a0639..ea748c93a 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -16,7 +16,6 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
-#include <net/switchdev.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"
@@ -166,8 +165,6 @@ static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
- vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
-
vinfo.vid = vid_end;
vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
@@ -675,6 +672,21 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
return -EADDRNOTAVAIL;
}
+ if (!data)
+ return 0;
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ if (data[IFLA_BR_VLAN_PROTOCOL]) {
+ switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
+ case htons(ETH_P_8021Q):
+ case htons(ETH_P_8021AD):
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+ }
+#endif
+
return 0;
}
@@ -730,6 +742,8 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
+ [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
+ [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -777,6 +791,24 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
br_stp_set_bridge_priority(br, priority);
}
+ if (data[IFLA_BR_VLAN_FILTERING]) {
+ u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
+
+ err = __br_vlan_filter_toggle(br, vlan_filter);
+ if (err)
+ return err;
+ }
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ if (data[IFLA_BR_VLAN_PROTOCOL]) {
+ __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
+
+ err = __br_vlan_set_proto(br, vlan_proto);
+ if (err)
+ return err;
+ }
+#endif
+
return 0;
}
@@ -788,6 +820,10 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
+#endif
0;
}
@@ -800,14 +836,21 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
+ u8 vlan_enabled = br_vlan_enabled(br);
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
- nla_put_u16(skb, IFLA_BR_PRIORITY, priority))
+ nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
+ nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
+ return -EMSGSIZE;
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+ if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto))
return -EMSGSIZE;
+#endif
return 0;
}
@@ -839,7 +882,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
.kind = "bridge",
.priv_size = sizeof(struct net_bridge),
.setup = br_dev_setup,
- .maxtype = IFLA_BRPORT_MAX,
+ .maxtype = IFLA_BR_MAX,
.policy = br_policy,
.validate = br_validate,
.newlink = br_dev_newlink,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8b21146b2..213baf7aa 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -95,15 +95,15 @@ struct net_bridge_fdb_entry
struct hlist_node hlist;
struct net_bridge_port *dst;
- struct rcu_head rcu;
unsigned long updated;
unsigned long used;
mac_addr addr;
+ __u16 vlan_id;
unsigned char is_local:1,
is_static:1,
added_by_user:1,
added_by_external_learn:1;
- __u16 vlan_id;
+ struct rcu_head rcu;
};
struct net_bridge_port_group {
@@ -466,6 +466,7 @@ void br_multicast_disable_port(struct net_bridge_port *port);
void br_multicast_init(struct net_bridge *br);
void br_multicast_open(struct net_bridge *br);
void br_multicast_stop(struct net_bridge *br);
+void br_multicast_dev_del(struct net_bridge *br);
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb);
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
@@ -488,7 +489,9 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
void br_mdb_init(void);
void br_mdb_uninit(void);
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
- struct br_ip *group, int type);
+ struct br_ip *group, int type, u8 state);
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+ int type);
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -565,6 +568,10 @@ static inline void br_multicast_stop(struct net_bridge *br)
{
}
+static inline void br_multicast_dev_del(struct net_bridge *br)
+{
+}
+
static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb)
{
@@ -607,7 +614,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
bool br_vlan_find(struct net_bridge *br, u16 vid);
void br_recalculate_fwd_mask(struct net_bridge *br);
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
int br_vlan_init(struct net_bridge *br);
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
@@ -764,6 +773,12 @@ static inline int br_vlan_enabled(struct net_bridge *br)
{
return 0;
}
+
+static inline int __br_vlan_filter_toggle(struct net_bridge *br,
+ unsigned long val)
+{
+ return -EOPNOTSUPP;
+}
#endif
struct nf_br_ops {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 0d41f8183..5f5a02b49 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -117,10 +117,11 @@ out_filt:
return err;
}
-static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
- u16 vid)
+static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
+ u16 vid)
{
const struct net_device_ops *ops = dev->netdev_ops;
+ int err = 0;
/* If driver uses VLAN ndo ops, use 8021q to delete vid
* on device, otherwise try switchdev ops to delete vid.
@@ -137,8 +138,12 @@ static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
},
};
- switchdev_port_obj_del(dev, &vlan_obj);
+ err = switchdev_port_obj_del(dev, &vlan_obj);
+ if (err == -EOPNOTSUPP)
+ err = 0;
}
+
+ return err;
}
static int __vlan_del(struct net_port_vlans *v, u16 vid)
@@ -151,7 +156,11 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
if (v->port_idx) {
struct net_bridge_port *p = v->parent.port;
- __vlan_vid_del(p->dev, p->br, vid);
+ int err;
+
+ err = __vlan_vid_del(p->dev, p->br, vid);
+ if (err)
+ return err;
}
clear_bit(vid, v->vlan_bitmap);
@@ -468,41 +477,40 @@ void br_recalculate_fwd_mask(struct net_bridge *br)
~(1u << br->group_addr[5]);
}
-int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
- if (!rtnl_trylock())
- return restart_syscall();
-
if (br->vlan_enabled == val)
- goto unlock;
+ return 0;
br->vlan_enabled = val;
br_manage_promisc(br);
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
-unlock:
+ return 0;
+}
+
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+{
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ __br_vlan_filter_toggle(br, val);
rtnl_unlock();
+
return 0;
}
-int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
+int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
{
int err = 0;
struct net_bridge_port *p;
struct net_port_vlans *pv;
- __be16 proto, oldproto;
+ __be16 oldproto;
u16 vid, errvid;
- if (val != ETH_P_8021Q && val != ETH_P_8021AD)
- return -EPROTONOSUPPORT;
-
- if (!rtnl_trylock())
- return restart_syscall();
-
- proto = htons(val);
if (br->vlan_proto == proto)
- goto unlock;
+ return 0;
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
@@ -533,9 +541,7 @@ int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
vlan_vid_del(p->dev, oldproto, vid);
}
-unlock:
- rtnl_unlock();
- return err;
+ return 0;
err_filt:
errvid = vid;
@@ -551,7 +557,23 @@ err_filt:
vlan_vid_del(p->dev, proto, vid);
}
- goto unlock;
+ return err;
+}
+
+int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
+{
+ int err;
+
+ if (val != ETH_P_8021Q && val != ETH_P_8021AD)
+ return -EPROTONOSUPPORT;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ err = __br_vlan_set_proto(br, htons(val));
+ rtnl_unlock();
+
+ return err;
}
static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 18ca4b24c..48b6b0129 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -176,7 +176,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
return 0;
}
-static inline __pure
+static inline
struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
{
return (void *)entry + entry->next_offset;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index edbca468f..d730a0f68 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -177,7 +177,7 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
skb->protocol = htons(ETH_P_CAIF);
/* Check if we need to handle xoff */
- if (likely(caifd->netdev->tx_queue_len == 0))
+ if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
goto noxoff;
if (unlikely(caifd->xoff))
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 69a4d30a9..54a00d665 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -357,6 +357,7 @@ ceph_parse_options(char *options, const char *dev_name,
opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
+ opt->monc_ping_timeout = CEPH_MONC_PING_TIMEOUT_DEFAULT;
/* get mon ip(s) */
/* ip1[:port1][,ip2[:port2]...] */
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 790fe89d9..4440edcce 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -79,10 +79,6 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
return 0;
}
-
-
-#define AES_KEY_SIZE 16
-
static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
{
return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index e3be1d22a..b9b0e3b5d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -163,6 +163,7 @@ static struct kmem_cache *ceph_msg_data_cache;
static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
+static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
#ifdef CONFIG_LOCKDEP
static struct lock_class_key socket_class;
@@ -176,7 +177,7 @@ static struct lock_class_key socket_class;
static void queue_con(struct ceph_connection *con);
static void cancel_con(struct ceph_connection *con);
-static void con_work(struct work_struct *);
+static void ceph_con_workfn(struct work_struct *);
static void con_fault(struct ceph_connection *con);
/*
@@ -276,22 +277,22 @@ static void _ceph_msgr_exit(void)
ceph_msgr_wq = NULL;
}
- ceph_msgr_slab_exit();
-
BUG_ON(zero_page == NULL);
page_cache_release(zero_page);
zero_page = NULL;
+
+ ceph_msgr_slab_exit();
}
int ceph_msgr_init(void)
{
+ if (ceph_msgr_slab_init())
+ return -ENOMEM;
+
BUG_ON(zero_page != NULL);
zero_page = ZERO_PAGE(0);
page_cache_get(zero_page);
- if (ceph_msgr_slab_init())
- return -ENOMEM;
-
/*
* The number of active work items is limited by the number of
* connections, so leave @max_active at default.
@@ -749,7 +750,7 @@ void ceph_con_init(struct ceph_connection *con, void *private,
mutex_init(&con->mutex);
INIT_LIST_HEAD(&con->out_queue);
INIT_LIST_HEAD(&con->out_sent);
- INIT_DELAYED_WORK(&con->work, con_work);
+ INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
con->state = CON_STATE_CLOSED;
}
@@ -1351,7 +1352,16 @@ static void prepare_write_keepalive(struct ceph_connection *con)
{
dout("prepare_write_keepalive %p\n", con);
con_out_kvec_reset(con);
- con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
+ if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
+ struct timespec now = CURRENT_TIME;
+
+ con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
+ ceph_encode_timespec(&con->out_temp_keepalive2, &now);
+ con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
+ &con->out_temp_keepalive2);
+ } else {
+ con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
+ }
con_flag_set(con, CON_FLAG_WRITE_PENDING);
}
@@ -1625,6 +1635,12 @@ static void prepare_read_tag(struct ceph_connection *con)
con->in_tag = CEPH_MSGR_TAG_READY;
}
+static void prepare_read_keepalive_ack(struct ceph_connection *con)
+{
+ dout("prepare_read_keepalive_ack %p\n", con);
+ con->in_base_pos = 0;
+}
+
/*
* Prepare to read a message.
*/
@@ -2322,13 +2338,6 @@ static int read_partial_message(struct ceph_connection *con)
return ret;
BUG_ON(!con->in_msg ^ skip);
- if (con->in_msg && data_len > con->in_msg->data_length) {
- pr_warn("%s skipping long message (%u > %zd)\n",
- __func__, data_len, con->in_msg->data_length);
- ceph_msg_put(con->in_msg);
- con->in_msg = NULL;
- skip = 1;
- }
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
@@ -2457,6 +2466,17 @@ static void process_message(struct ceph_connection *con)
mutex_lock(&con->mutex);
}
+static int read_keepalive_ack(struct ceph_connection *con)
+{
+ struct ceph_timespec ceph_ts;
+ size_t size = sizeof(ceph_ts);
+ int ret = read_partial(con, size, size, &ceph_ts);
+ if (ret <= 0)
+ return ret;
+ ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts);
+ prepare_read_tag(con);
+ return 1;
+}
/*
* Write something to the socket. Called in a worker thread when the
@@ -2526,6 +2546,10 @@ more_kvec:
do_next:
if (con->state == CON_STATE_OPEN) {
+ if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
+ prepare_write_keepalive(con);
+ goto more;
+ }
/* is anything else pending? */
if (!list_empty(&con->out_queue)) {
prepare_write_message(con);
@@ -2535,10 +2559,6 @@ do_next:
prepare_write_ack(con);
goto more;
}
- if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
- prepare_write_keepalive(con);
- goto more;
- }
}
/* Nothing to do! */
@@ -2641,6 +2661,9 @@ more:
case CEPH_MSGR_TAG_ACK:
prepare_read_ack(con);
break;
+ case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
+ prepare_read_keepalive_ack(con);
+ break;
case CEPH_MSGR_TAG_CLOSE:
con_close_socket(con);
con->state = CON_STATE_CLOSED;
@@ -2684,6 +2707,12 @@ more:
process_ack(con);
goto more;
}
+ if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
+ ret = read_keepalive_ack(con);
+ if (ret <= 0)
+ goto out;
+ goto more;
+ }
out:
dout("try_read done on %p ret %d\n", con, ret);
@@ -2799,7 +2828,7 @@ static void con_fault_finish(struct ceph_connection *con)
/*
* Do some work on a connection. Drop a connection ref when we're done.
*/
-static void con_work(struct work_struct *work)
+static void ceph_con_workfn(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
@@ -3101,6 +3130,20 @@ void ceph_con_keepalive(struct ceph_connection *con)
}
EXPORT_SYMBOL(ceph_con_keepalive);
+bool ceph_con_keepalive_expired(struct ceph_connection *con,
+ unsigned long interval)
+{
+ if (interval > 0 &&
+ (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
+ struct timespec now = CURRENT_TIME;
+ struct timespec ts;
+ jiffies_to_timespec(interval, &ts);
+ ts = timespec_add(con->last_keepalive_ack, ts);
+ return timespec_compare(&now, &ts) >= 0;
+ }
+ return false;
+}
+
static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
{
struct ceph_msg_data *data;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 9d6ff1215..edda01626 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -149,6 +149,10 @@ static int __open_session(struct ceph_mon_client *monc)
CEPH_ENTITY_TYPE_MON, monc->cur_mon,
&monc->monmap->mon_inst[monc->cur_mon].addr);
+ /* send an initial keepalive to ensure our timestamp is
+ * valid by the time we are in an OPENED state */
+ ceph_con_keepalive(&monc->con);
+
/* initiatiate authentication handshake */
ret = ceph_auth_build_hello(monc->auth,
monc->m_auth->front.iov_base,
@@ -170,14 +174,19 @@ static bool __sub_expired(struct ceph_mon_client *monc)
*/
static void __schedule_delayed(struct ceph_mon_client *monc)
{
- unsigned int delay;
+ struct ceph_options *opt = monc->client->options;
+ unsigned long delay;
- if (monc->cur_mon < 0 || __sub_expired(monc))
+ if (monc->cur_mon < 0 || __sub_expired(monc)) {
delay = 10 * HZ;
- else
+ } else {
delay = 20 * HZ;
- dout("__schedule_delayed after %u\n", delay);
- schedule_delayed_work(&monc->delayed_work, delay);
+ if (opt->monc_ping_timeout > 0)
+ delay = min(delay, opt->monc_ping_timeout / 3);
+ }
+ dout("__schedule_delayed after %lu\n", delay);
+ schedule_delayed_work(&monc->delayed_work,
+ round_jiffies_relative(delay));
}
/*
@@ -743,11 +752,23 @@ static void delayed_work(struct work_struct *work)
__close_session(monc);
__open_session(monc); /* continue hunting */
} else {
- ceph_con_keepalive(&monc->con);
+ struct ceph_options *opt = monc->client->options;
+ int is_auth = ceph_auth_is_authenticated(monc->auth);
+ if (ceph_con_keepalive_expired(&monc->con,
+ opt->monc_ping_timeout)) {
+ dout("monc keepalive timeout\n");
+ is_auth = 0;
+ __close_session(monc);
+ monc->hunting = true;
+ __open_session(monc);
+ }
- __validate_auth(monc);
+ if (!monc->hunting) {
+ ceph_con_keepalive(&monc->con);
+ __validate_auth(monc);
+ }
- if (ceph_auth_is_authenticated(monc->auth))
+ if (is_auth)
__send_subscribe(monc);
}
__schedule_delayed(monc);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 50033677c..f79ccac66 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -285,6 +285,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
switch (op->op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
+ case CEPH_OSD_OP_WRITEFULL:
ceph_osd_data_release(&op->extent.osd_data);
break;
case CEPH_OSD_OP_CALL:
@@ -485,13 +486,14 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
size_t payload_len = 0;
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
- opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
+ opcode != CEPH_OSD_OP_TRUNCATE);
op->extent.offset = offset;
op->extent.length = length;
op->extent.truncate_size = truncate_size;
op->extent.truncate_seq = truncate_seq;
- if (opcode == CEPH_OSD_OP_WRITE)
+ if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
payload_len += length;
op->payload_len = payload_len;
@@ -670,9 +672,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
break;
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
+ case CEPH_OSD_OP_WRITEFULL:
case CEPH_OSD_OP_ZERO:
case CEPH_OSD_OP_TRUNCATE:
- if (src->op == CEPH_OSD_OP_WRITE)
+ if (src->op == CEPH_OSD_OP_WRITE ||
+ src->op == CEPH_OSD_OP_WRITEFULL)
request_data_len = src->extent.length;
dst->extent.offset = cpu_to_le64(src->extent.offset);
dst->extent.length = cpu_to_le64(src->extent.length);
@@ -681,7 +685,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
dst->extent.truncate_seq =
cpu_to_le32(src->extent.truncate_seq);
osd_data = &src->extent.osd_data;
- if (src->op == CEPH_OSD_OP_WRITE)
+ if (src->op == CEPH_OSD_OP_WRITE ||
+ src->op == CEPH_OSD_OP_WRITEFULL)
ceph_osdc_msg_data_add(req->r_request, osd_data);
else
ceph_osdc_msg_data_add(req->r_reply, osd_data);
@@ -2817,8 +2822,9 @@ out:
}
/*
- * lookup and return message for incoming reply. set up reply message
- * pages.
+ * Lookup and return message for incoming reply. Don't try to do
+ * anything about a larger than preallocated data portion of the
+ * message at the moment - for now, just skip the message.
*/
static struct ceph_msg *get_reply(struct ceph_connection *con,
struct ceph_msg_header *hdr,
@@ -2836,10 +2842,10 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (!req) {
- *skip = 1;
+ pr_warn("%s osd%d tid %llu unknown, skipping\n",
+ __func__, osd->o_osd, tid);
m = NULL;
- dout("get_reply unknown tid %llu from osd%d\n", tid,
- osd->o_osd);
+ *skip = 1;
goto out;
}
@@ -2849,10 +2855,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
ceph_msg_revoke_incoming(req->r_reply);
if (front_len > req->r_reply->front_alloc_len) {
- pr_warn("get_reply front %d > preallocated %d (%u#%llu)\n",
- front_len, req->r_reply->front_alloc_len,
- (unsigned int)con->peer_name.type,
- le64_to_cpu(con->peer_name.num));
+ pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
+ __func__, osd->o_osd, req->r_tid, front_len,
+ req->r_reply->front_alloc_len);
m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
false);
if (!m)
@@ -2860,37 +2865,22 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
ceph_msg_put(req->r_reply);
req->r_reply = m;
}
- m = ceph_msg_get(req->r_reply);
-
- if (data_len > 0) {
- struct ceph_osd_data *osd_data;
- /*
- * XXX This is assuming there is only one op containing
- * XXX page data. Probably OK for reads, but this
- * XXX ought to be done more generally.
- */
- osd_data = osd_req_op_extent_osd_data(req, 0);
- if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
- if (osd_data->pages &&
- unlikely(osd_data->length < data_len)) {
-
- pr_warn("tid %lld reply has %d bytes we had only %llu bytes ready\n",
- tid, data_len, osd_data->length);
- *skip = 1;
- ceph_msg_put(m);
- m = NULL;
- goto out;
- }
- }
+ if (data_len > req->r_reply->data_length) {
+ pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
+ __func__, osd->o_osd, req->r_tid, data_len,
+ req->r_reply->data_length);
+ m = NULL;
+ *skip = 1;
+ goto out;
}
- *skip = 0;
+
+ m = ceph_msg_get(req->r_reply);
dout("get_reply tid %lld %p\n", tid, m);
out:
mutex_unlock(&osdc->request_mutex);
return m;
-
}
static struct ceph_msg *alloc_msg(struct ceph_connection *con,
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 4a3125836..7d8f581d9 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1300,7 +1300,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
ceph_decode_addr(&addr);
pr_info("osd%d up\n", osd);
BUG_ON(osd >= map->max_osd);
- map->osd_state[osd] |= CEPH_OSD_UP;
+ map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
map->osd_addr[osd] = addr;
}
diff --git a/net/core/Makefile b/net/core/Makefile
index fec0856dd..086b01fbe 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
+obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
diff --git a/net/core/dev.c b/net/core/dev.c
index a8e4dd430..c14748d05 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -99,6 +99,7 @@
#include <linux/rtnetlink.h>
#include <linux/stat.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
#include <net/xfrm.h>
@@ -682,6 +683,32 @@ int dev_get_iflink(const struct net_device *dev)
EXPORT_SYMBOL(dev_get_iflink);
/**
+ * dev_fill_metadata_dst - Retrieve tunnel egress information.
+ * @dev: targeted interface
+ * @skb: The packet.
+ *
+ * For better visibility of tunnel traffic OVS needs to retrieve
+ * egress tunnel information for a packet. Following API allows
+ * user to get this info.
+ */
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info;
+
+ if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
+ return -EINVAL;
+
+ info = skb_tunnel_info_unclone(skb);
+ if (!info)
+ return -ENOMEM;
+ if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
+ return -EINVAL;
+
+ return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
+}
+EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
+
+/**
* __dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
@@ -3061,6 +3088,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
else
skb_dst_force(skb);
+#ifdef CONFIG_NET_SWITCHDEV
+ /* Don't forward if offload device already forwarded */
+ if (skb->offload_fwd_mark &&
+ skb->offload_fwd_mark == dev->offload_fwd_mark) {
+ consume_skb(skb);
+ rc = NET_XMIT_SUCCESS;
+ goto out;
+ }
+#endif
+
txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
@@ -3645,15 +3682,15 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
qdisc_skb_cb(skb)->pkt_len = skb->len;
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- qdisc_bstats_update_cpu(cl->q, skb);
+ qdisc_bstats_cpu_update(cl->q, skb);
- switch (tc_classify(skb, cl, &cl_res)) {
+ switch (tc_classify(skb, cl, &cl_res, false)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
break;
case TC_ACT_SHOT:
- qdisc_qstats_drop_cpu(cl->q);
+ qdisc_qstats_cpu_drop(cl->q);
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
kfree_skb(skb);
@@ -4703,6 +4740,8 @@ void napi_disable(struct napi_struct *n)
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
+ while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
+ msleep(1);
hrtimer_cancel(&n->timer);
@@ -4985,7 +5024,7 @@ EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
* Gets the next netdev_adjacent->private from the dev's lower neighbour
* list, starting from iter position. The caller must hold either hold the
* RTNL lock or its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
*/
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter)
@@ -5040,7 +5079,7 @@ EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
* Gets the next netdev_adjacent from the dev's lower neighbour
* list, starting from iter position. The caller must hold RTNL lock or
* its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
*/
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
{
@@ -5301,6 +5340,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master,
void *private)
{
+ struct netdev_notifier_changeupper_info changeupper_info;
struct netdev_adjacent *i, *j, *to_i, *to_j;
int ret = 0;
@@ -5319,6 +5359,10 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (master && netdev_master_upper_dev_get(dev))
return -EBUSY;
+ changeupper_info.upper_dev = upper_dev;
+ changeupper_info.master = master;
+ changeupper_info.linking = true;
+
ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
master);
if (ret)
@@ -5357,7 +5401,8 @@ static int __netdev_upper_dev_link(struct net_device *dev,
goto rollback_lower_mesh;
}
- call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+ &changeupper_info.info);
return 0;
rollback_lower_mesh:
@@ -5452,9 +5497,14 @@ EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{
+ struct netdev_notifier_changeupper_info changeupper_info;
struct netdev_adjacent *i, *j;
ASSERT_RTNL();
+ changeupper_info.upper_dev = upper_dev;
+ changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
+ changeupper_info.linking = false;
+
__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
/* Here is the tricky part. We must remove all dev's lower
@@ -5474,7 +5524,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
__netdev_adjacent_dev_unlink(dev, i->dev);
- call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
+ call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+ &changeupper_info.info);
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
@@ -6075,6 +6126,26 @@ int dev_get_phys_port_name(struct net_device *dev,
EXPORT_SYMBOL(dev_get_phys_port_name);
/**
+ * dev_change_proto_down - update protocol port state information
+ * @dev: device
+ * @proto_down: new value
+ *
+ * This info can be used by switch drivers to set the phys state of the
+ * port.
+ */
+int dev_change_proto_down(struct net_device *dev, bool proto_down)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_change_proto_down)
+ return -EOPNOTSUPP;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return ops->ndo_change_proto_down(dev, proto_down);
+}
+EXPORT_SYMBOL(dev_change_proto_down);
+
+/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
@@ -6967,6 +7038,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
+ if (!dev->tx_queue_len)
+ dev->priv_flags |= IFF_NO_QUEUE;
+
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
if (netif_alloc_netdev_queues(dev))
@@ -7639,7 +7713,7 @@ static int __init net_dev_init(void)
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
hotcpu_notifier(dev_cpu_callback, 0);
- dst_init();
+ dst_subsys_init();
rc = 0;
out:
return rc;
diff --git a/net/core/dst.c b/net/core/dst.c
index 002144bea..d6a5a0bc7 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -20,8 +20,10 @@
#include <net/net_namespace.h>
#include <linux/sched.h>
#include <linux/prefetch.h>
+#include <net/lwtunnel.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
/*
* Theory of operations:
@@ -158,19 +160,10 @@ const u32 dst_default_metrics[RTAX_MAX + 1] = {
[RTAX_MAX] = 0xdeadbeef,
};
-
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
- int initial_ref, int initial_obsolete, unsigned short flags)
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+ struct net_device *dev, int initial_ref, int initial_obsolete,
+ unsigned short flags)
{
- struct dst_entry *dst;
-
- if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
- if (ops->gc(ops))
- return NULL;
- }
- dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
- if (!dst)
- return NULL;
dst->child = NULL;
dst->dev = dev;
if (dev)
@@ -192,6 +185,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
#ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = 0;
#endif
+ dst->lwtstate = NULL;
atomic_set(&dst->__refcnt, initial_ref);
dst->__use = 0;
dst->lastuse = jiffies;
@@ -200,6 +194,25 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->next = NULL;
if (!(flags & DST_NOCOUNT))
dst_entries_add(ops, 1);
+}
+EXPORT_SYMBOL(dst_init);
+
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+ int initial_ref, int initial_obsolete, unsigned short flags)
+{
+ struct dst_entry *dst;
+
+ if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
+ if (ops->gc(ops))
+ return NULL;
+ }
+
+ dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+ if (!dst)
+ return NULL;
+
+ dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
+
return dst;
}
EXPORT_SYMBOL(dst_alloc);
@@ -248,7 +261,13 @@ again:
dst->ops->destroy(dst);
if (dst->dev)
dev_put(dst->dev);
- kmem_cache_free(dst->ops->kmem_cachep, dst);
+
+ lwtstate_put(dst->lwtstate);
+
+ if (dst->flags & DST_METADATA)
+ kfree(dst);
+ else
+ kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child;
if (dst) {
@@ -287,7 +306,7 @@ void dst_release(struct dst_entry *dst)
if (unlikely(newrefcnt < 0))
net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
__func__, dst, newrefcnt);
- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
+ if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
call_rcu(&dst->rcu_head, dst_destroy_rcu);
}
}
@@ -329,6 +348,69 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
}
EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+static struct dst_ops md_dst_ops = {
+ .family = AF_UNSPEC,
+};
+
+static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+{
+ WARN_ONCE(1, "Attempting to call output on metadata dst\n");
+ kfree_skb(skb);
+ return 0;
+}
+
+static int dst_md_discard(struct sk_buff *skb)
+{
+ WARN_ONCE(1, "Attempting to call input on metadata dst\n");
+ kfree_skb(skb);
+ return 0;
+}
+
+static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
+{
+ struct dst_entry *dst;
+
+ dst = &md_dst->dst;
+ dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+ DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
+
+ dst->input = dst_md_discard;
+ dst->output = dst_md_discard_sk;
+
+ memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
+{
+ struct metadata_dst *md_dst;
+
+ md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
+ if (!md_dst)
+ return NULL;
+
+ __metadata_dst_init(md_dst, optslen);
+
+ return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc);
+
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
+{
+ int cpu;
+ struct metadata_dst __percpu *md_dst;
+
+ md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
+ __alignof__(struct metadata_dst), flags);
+ if (!md_dst)
+ return NULL;
+
+ for_each_possible_cpu(cpu)
+ __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
+
+ return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
+
/* Dirty hack. We did it in 2.2 (in __dst_free),
* we have _very_ good reasons not to repeat
* this mistake in 2.3, but we have no choice
@@ -393,7 +475,7 @@ static struct notifier_block dst_dev_notifier = {
.priority = -10, /* must be called after other network notifiers */
};
-void __init dst_init(void)
+void __init dst_subsys_init(void)
{
register_netdevice_notifier(&dst_dev_notifier);
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 0ad144fb0..365de6643 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -16,6 +16,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/fib_rules.h>
+#include <net/ip_tunnels.h>
int fib_default_rule_add(struct fib_rules_ops *ops,
u32 pref, u32 table, u32 flags)
@@ -43,7 +44,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
}
EXPORT_SYMBOL(fib_default_rule_add);
-u32 fib_default_rule_pref(struct fib_rules_ops *ops)
+static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
{
struct list_head *pos;
struct fib_rule *rule;
@@ -59,7 +60,6 @@ u32 fib_default_rule_pref(struct fib_rules_ops *ops)
return 0;
}
-EXPORT_SYMBOL(fib_default_rule_pref);
static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
@@ -186,6 +186,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
goto out;
+ if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
+ goto out;
+
ret = ops->match(rule, fl, flags);
out:
return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -295,8 +298,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
}
rule->fr_net = net;
- if (tb[FRA_PRIORITY])
- rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
+ rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
+ : fib_default_rule_pref(ops);
if (tb[FRA_IIFNAME]) {
struct net_device *dev;
@@ -330,6 +333,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
if (tb[FRA_FWMASK])
rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
+ if (tb[FRA_TUN_ID])
+ rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+
rule->action = frh->action;
rule->flags = frh->flags;
rule->table = frh_get_table(frh, tb);
@@ -343,9 +349,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
else
rule->suppress_ifgroup = -1;
- if (!tb[FRA_PRIORITY] && ops->default_pref)
- rule->pref = ops->default_pref(ops);
-
err = -EINVAL;
if (tb[FRA_GOTO]) {
if (rule->action != FR_ACT_GOTO)
@@ -407,6 +410,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
if (unresolved)
ops->unresolved_rules++;
+ if (rule->tun_id)
+ ip_tunnel_need_metadata();
+
notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
flush_route_cache(ops);
rules_ops_put(ops);
@@ -473,6 +479,10 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
(rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
continue;
+ if (tb[FRA_TUN_ID] &&
+ (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
+ continue;
+
if (!ops->compare(rule, frh, tb))
continue;
@@ -487,6 +497,9 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
goto errout;
}
+ if (rule->tun_id)
+ ip_tunnel_unneed_metadata();
+
list_del_rcu(&rule->list);
if (rule->action == FR_ACT_GOTO) {
@@ -535,7 +548,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
+ nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
+ nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
+ nla_total_size(4) /* FRA_FWMARK */
- + nla_total_size(4); /* FRA_FWMASK */
+ + nla_total_size(4) /* FRA_FWMASK */
+ + nla_total_size(8); /* FRA_TUN_ID */
if (ops->nlmsg_payload)
payload += ops->nlmsg_payload(rule);
@@ -591,7 +605,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
((rule->mark_mask || rule->mark) &&
nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
(rule->target &&
- nla_put_u32(skb, FRA_GOTO, rule->target)))
+ nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+ (rule->tun_id &&
+ nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
goto nla_put_failure;
if (rule->suppress_ifgroup != -1) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 8dcdd86b6..bb18c3680 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -47,6 +47,8 @@
#include <linux/if_vlan.h>
#include <linux/bpf.h>
#include <net/sch_generic.h>
+#include <net/cls_cgroup.h>
+#include <net/dst_metadata.h>
/**
* sk_filter - run a packet through a socket filter
@@ -476,9 +478,9 @@ do_pass:
bpf_src = BPF_X;
} else {
insn->dst_reg = BPF_REG_A;
- insn->src_reg = BPF_REG_X;
insn->imm = fp->k;
bpf_src = BPF_SRC(fp->code);
+ insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
}
/* Common case where 'jump_false' is next insn. */
@@ -1122,6 +1124,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
*pfp = fp;
return 0;
}
+EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
void bpf_prog_destroy(struct bpf_prog *fp)
{
@@ -1346,7 +1349,7 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = {
static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+ bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
int offset = (int) r2;
__sum16 sum, *ptr;
@@ -1425,6 +1428,139 @@ const struct bpf_func_proto bpf_clone_redirect_proto = {
.arg3_type = ARG_ANYTHING,
};
+static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ return task_get_classid((struct sk_buff *) (unsigned long) r1);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
+ .func = bpf_get_cgroup_classid,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ __be16 vlan_proto = (__force __be16) r2;
+
+ if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
+ vlan_proto != htons(ETH_P_8021AD)))
+ vlan_proto = htons(ETH_P_8021Q);
+
+ return skb_vlan_push(skb, vlan_proto, vlan_tci);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_push_proto = {
+ .func = bpf_skb_vlan_push,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
+
+static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+
+ return skb_vlan_pop(skb);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+ .func = bpf_skb_vlan_pop,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+
+bool bpf_helper_changes_skb_data(void *func)
+{
+ if (func == bpf_skb_vlan_push)
+ return true;
+ if (func == bpf_skb_vlan_pop)
+ return true;
+ return false;
+}
+
+static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+
+ if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
+ return -EINVAL;
+ if (ip_tunnel_info_af(info) != AF_INET)
+ return -EINVAL;
+
+ to->tunnel_id = be64_to_cpu(info->key.tun_id);
+ to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
+
+ return 0;
+}
+
+const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+ .func = bpf_skb_get_tunnel_key,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_STACK,
+ .arg3_type = ARG_CONST_STACK_SIZE,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static struct metadata_dst __percpu *md_dst;
+
+static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
+ struct metadata_dst *md = this_cpu_ptr(md_dst);
+ struct ip_tunnel_info *info;
+
+ if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
+ return -EINVAL;
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *) md);
+ skb_dst_set(skb, (struct dst_entry *) md);
+
+ info = &md->u.tun_info;
+ info->mode = IP_TUNNEL_INFO_TX;
+ info->key.tun_flags = TUNNEL_KEY;
+ info->key.tun_id = cpu_to_be64(from->tunnel_id);
+ info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
+
+ return 0;
+}
+
+const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+ .func = bpf_skb_set_tunnel_key,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_STACK,
+ .arg3_type = ARG_CONST_STACK_SIZE,
+ .arg4_type = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+{
+ if (!md_dst) {
+ /* race is not possible, since it's called from
+ * verifier that is holding verifier mutex
+ */
+ md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+ if (!md_dst)
+ return NULL;
+ }
+ return &bpf_skb_set_tunnel_key_proto;
+}
+
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
{
@@ -1462,6 +1598,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_l4_csum_replace_proto;
case BPF_FUNC_clone_redirect:
return &bpf_clone_redirect_proto;
+ case BPF_FUNC_get_cgroup_classid:
+ return &bpf_get_cgroup_classid_proto;
+ case BPF_FUNC_skb_vlan_push:
+ return &bpf_skb_vlan_push_proto;
+ case BPF_FUNC_skb_vlan_pop:
+ return &bpf_skb_vlan_pop_proto;
+ case BPF_FUNC_skb_get_tunnel_key:
+ return &bpf_skb_get_tunnel_key_proto;
+ case BPF_FUNC_skb_set_tunnel_key:
+ return bpf_get_skb_set_tunnel_key_proto();
default:
return sk_filter_func_proto(func_id);
}
@@ -1570,6 +1716,13 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
offsetof(struct net_device, ifindex));
break;
+ case offsetof(struct __sk_buff, hash):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, hash));
+ break;
+
case offsetof(struct __sk_buff, mark):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2a834c617..d79699c9d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -19,14 +19,14 @@
#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
-static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector,
- enum flow_dissector_key_id key_id)
+static bool dissector_uses_key(const struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id)
{
return flow_dissector->used_keys & (1 << key_id);
}
-static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector,
- enum flow_dissector_key_id key_id)
+static void dissector_set_key(struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id)
{
flow_dissector->used_keys |= (1 << key_id);
}
@@ -51,20 +51,20 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
* boundaries of unsigned short.
*/
BUG_ON(key->offset > USHRT_MAX);
- BUG_ON(skb_flow_dissector_uses_key(flow_dissector,
- key->key_id));
+ BUG_ON(dissector_uses_key(flow_dissector,
+ key->key_id));
- skb_flow_dissector_set_key(flow_dissector, key->key_id);
+ dissector_set_key(flow_dissector, key->key_id);
flow_dissector->offset[key->key_id] = key->offset;
}
/* Ensure that the dissector always includes control and basic key.
* That way we are able to avoid handling lack of these in fast path.
*/
- BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_CONTROL));
- BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_BASIC));
+ BUG_ON(!dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_CONTROL));
+ BUG_ON(!dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_BASIC));
}
EXPORT_SYMBOL(skb_flow_dissector_init);
@@ -121,7 +121,8 @@ EXPORT_SYMBOL(__skb_flow_get_ports);
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
- void *data, __be16 proto, int nhoff, int hlen)
+ void *data, __be16 proto, int nhoff, int hlen,
+ unsigned int flags)
{
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
@@ -130,6 +131,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_keyid *key_keyid;
u8 ip_proto = 0;
+ bool ret = false;
if (!data) {
data = skb->data;
@@ -152,8 +154,8 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb);
struct flow_dissector_key_eth_addrs *key_eth_addrs;
@@ -171,15 +173,13 @@ again:
ip:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph || iph->ihl < 5)
- return false;
+ goto out_bad;
nhoff += iph->ihl * 4;
ip_proto = iph->protocol;
- if (ip_is_fragment(iph))
- ip_proto = 0;
- if (!skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS))
+ if (!dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS))
break;
key_addrs = skb_flow_dissector_target(flow_dissector,
@@ -187,6 +187,22 @@ ip:
memcpy(&key_addrs->v4addrs, &iph->saddr,
sizeof(key_addrs->v4addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+
+ if (ip_is_fragment(iph)) {
+ key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+
+ if (iph->frag_off & htons(IP_OFFSET)) {
+ goto out_good;
+ } else {
+ key_control->flags |= FLOW_DIS_FIRST_FRAG;
+ if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
+ goto out_good;
+ }
+ }
+
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
+ goto out_good;
+
break;
}
case htons(ETH_P_IPV6): {
@@ -197,13 +213,13 @@ ip:
ipv6:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph)
- return false;
+ goto out_bad;
ip_proto = iph->nexthdr;
nhoff += sizeof(struct ipv6hdr);
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
@@ -216,15 +232,20 @@ ipv6:
flow_label = ip6_flowlabel(iph);
if (flow_label) {
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
key_tags = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL,
target_container);
key_tags->flow_label = ntohl(flow_label);
}
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
+ goto out_good;
}
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
+ goto out_good;
+
break;
}
case htons(ETH_P_8021AD):
@@ -234,10 +255,10 @@ ipv6:
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
if (!vlan)
- return false;
+ goto out_bad;
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_VLANID)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_VLANID)) {
key_tags = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_VLANID,
target_container);
@@ -256,7 +277,7 @@ ipv6:
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
- return false;
+ goto out_bad;
proto = hdr->proto;
nhoff += PPPOE_SES_HLEN;
switch (proto) {
@@ -265,7 +286,7 @@ ipv6:
case htons(PPP_IPV6):
goto ipv6;
default:
- return false;
+ goto out_bad;
}
}
case htons(ETH_P_TIPC): {
@@ -275,19 +296,17 @@ ipv6:
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
- return false;
- key_basic->n_proto = proto;
- key_control->thoff = (u16)nhoff;
+ goto out_bad;
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS,
target_container);
key_addrs->tipcaddrs.srcnode = hdr->srcnode;
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
}
- return true;
+ goto out_good;
}
case htons(ETH_P_MPLS_UC):
@@ -297,12 +316,12 @@ mpls:
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
hlen, &_hdr);
if (!hdr)
- return false;
+ goto out_bad;
if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
target_container);
@@ -310,21 +329,17 @@ mpls:
htonl(MPLS_LS_LABEL_MASK);
}
- key_basic->n_proto = proto;
- key_basic->ip_proto = ip_proto;
- key_control->thoff = (u16)nhoff;
-
- return true;
+ goto out_good;
}
- return true;
+ goto out_good;
}
case htons(ETH_P_FCOE):
key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
/* fall through */
default:
- return false;
+ goto out_bad;
}
ip_proto_again:
@@ -337,7 +352,7 @@ ip_proto_again:
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
- return false;
+ goto out_bad;
/*
* Only look inside GRE if version zero and no
* routing
@@ -357,10 +372,10 @@ ip_proto_again:
data, hlen, &_keyid);
if (!keyid)
- return false;
+ goto out_bad;
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_GRE_KEYID)) {
key_keyid = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_GRE_KEYID,
target_container);
@@ -378,10 +393,15 @@ ip_proto_again:
sizeof(_eth),
data, hlen, &_eth);
if (!eth)
- return false;
+ goto out_bad;
proto = eth->h_proto;
nhoff += sizeof(*eth);
}
+
+ key_control->flags |= FLOW_DIS_ENCAPSULATION;
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+ goto out_good;
+
goto again;
}
case NEXTHDR_HOP:
@@ -395,18 +415,53 @@ ip_proto_again:
opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
data, hlen, &_opthdr);
if (!opthdr)
- return false;
+ goto out_bad;
ip_proto = opthdr[0];
nhoff += (opthdr[1] + 1) << 3;
goto ip_proto_again;
}
+ case NEXTHDR_FRAGMENT: {
+ struct frag_hdr _fh, *fh;
+
+ if (proto != htons(ETH_P_IPV6))
+ break;
+
+ fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
+ data, hlen, &_fh);
+
+ if (!fh)
+ goto out_bad;
+
+ key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+
+ nhoff += sizeof(_fh);
+
+ if (!(fh->frag_off & htons(IP6_OFFSET))) {
+ key_control->flags |= FLOW_DIS_FIRST_FRAG;
+ if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
+ ip_proto = fh->nexthdr;
+ goto ip_proto_again;
+ }
+ }
+ goto out_good;
+ }
case IPPROTO_IPIP:
proto = htons(ETH_P_IP);
+
+ key_control->flags |= FLOW_DIS_ENCAPSULATION;
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+ goto out_good;
+
goto ip;
case IPPROTO_IPV6:
proto = htons(ETH_P_IPV6);
+
+ key_control->flags |= FLOW_DIS_ENCAPSULATION;
+ if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+ goto out_good;
+
goto ipv6;
case IPPROTO_MPLS:
proto = htons(ETH_P_MPLS_UC);
@@ -415,12 +470,8 @@ ip_proto_again:
break;
}
- key_basic->n_proto = proto;
- key_basic->ip_proto = ip_proto;
- key_control->thoff = (u16)nhoff;
-
- if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS)) {
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
@@ -428,7 +479,15 @@ ip_proto_again:
data, hlen);
}
- return true;
+out_good:
+ ret = true;
+
+out_bad:
+ key_basic->n_proto = proto;
+ key_basic->ip_proto = ip_proto;
+ key_control->thoff = (u16)nhoff;
+
+ return ret;
}
EXPORT_SYMBOL(__skb_flow_dissect);
@@ -438,18 +497,21 @@ static __always_inline void __flow_hash_secret_init(void)
net_get_random_once(&hashrnd, sizeof(hashrnd));
}
-static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval)
+static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
+ u32 keyval)
{
return jhash2(words, length, keyval);
}
-static inline void *flow_keys_hash_start(struct flow_keys *flow)
+static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
{
+ const void *p = flow;
+
BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
- return (void *)flow + FLOW_KEYS_HASH_OFFSET;
+ return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
}
-static inline size_t flow_keys_hash_length(struct flow_keys *flow)
+static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
{
size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
@@ -539,7 +601,7 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
__flow_hash_consistentify(keys);
- hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys),
+ hash = __flow_hash_words(flow_keys_hash_start(keys),
flow_keys_hash_length(keys), keyval);
if (!hash)
hash = 1;
@@ -557,8 +619,8 @@ EXPORT_SYMBOL(flow_hash_from_keys);
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
struct flow_keys *keys, u32 keyval)
{
- if (!skb_flow_dissect_flow_keys(skb, keys))
- return 0;
+ skb_flow_dissect_flow_keys(skb, keys,
+ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
return __flow_hash_from_keys(keys, keyval);
}
@@ -602,17 +664,11 @@ EXPORT_SYMBOL(make_flow_keys_digest);
void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
- u32 hash;
__flow_hash_secret_init();
- hash = ___skb_get_hash(skb, &keys, hashrnd);
- if (!hash)
- return;
- if (keys.ports.ports)
- skb->l4_hash = 1;
- skb->sw_hash = 1;
- skb->hash = hash;
+ __skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
+ flow_keys_have_l4(&keys));
}
EXPORT_SYMBOL(__skb_get_hash);
@@ -624,6 +680,51 @@ __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
}
EXPORT_SYMBOL(skb_get_hash_perturb);
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
+{
+ struct flow_keys keys;
+
+ memset(&keys, 0, sizeof(keys));
+
+ memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
+ sizeof(keys.addrs.v6addrs.src));
+ memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
+ sizeof(keys.addrs.v6addrs.dst));
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ keys.ports.src = fl6->fl6_sport;
+ keys.ports.dst = fl6->fl6_dport;
+ keys.keyid.keyid = fl6->fl6_gre_key;
+ keys.tags.flow_label = (__force u32)fl6->flowlabel;
+ keys.basic.ip_proto = fl6->flowi6_proto;
+
+ __skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
+ flow_keys_have_l4(&keys));
+
+ return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi6);
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
+{
+ struct flow_keys keys;
+
+ memset(&keys, 0, sizeof(keys));
+
+ keys.addrs.v4addrs.src = fl4->saddr;
+ keys.addrs.v4addrs.dst = fl4->daddr;
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ keys.ports.src = fl4->fl4_sport;
+ keys.ports.dst = fl4->fl4_dport;
+ keys.keyid.keyid = fl4->fl4_gre_key;
+ keys.basic.ip_proto = fl4->flowi4_proto;
+
+ __skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
+ flow_keys_have_l4(&keys));
+
+ return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi4);
+
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen)
{
@@ -683,12 +784,47 @@ u32 skb_get_poff(const struct sk_buff *skb)
{
struct flow_keys keys;
- if (!skb_flow_dissect_flow_keys(skb, &keys))
+ if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
return 0;
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
}
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
+{
+ memset(keys, 0, sizeof(*keys));
+
+ memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
+ sizeof(keys->addrs.v6addrs.src));
+ memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
+ sizeof(keys->addrs.v6addrs.dst));
+ keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ keys->ports.src = fl6->fl6_sport;
+ keys->ports.dst = fl6->fl6_dport;
+ keys->keyid.keyid = fl6->fl6_gre_key;
+ keys->tags.flow_label = (__force u32)fl6->flowlabel;
+ keys->basic.ip_proto = fl6->flowi6_proto;
+
+ return flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(__get_hash_from_flowi6);
+
+__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys)
+{
+ memset(keys, 0, sizeof(*keys));
+
+ keys->addrs.v4addrs.src = fl4->saddr;
+ keys->addrs.v4addrs.dst = fl4->daddr;
+ keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ keys->ports.src = fl4->fl4_sport;
+ keys->ports.dst = fl4->fl4_dport;
+ keys->keyid.keyid = fl4->fl4_gre_key;
+ keys->basic.ip_proto = fl4->flowi4_proto;
+
+ return flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(__get_hash_from_flowi4);
+
static const struct flow_dissector_key flow_keys_dissector_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
new file mode 100644
index 000000000..dfb1a9ca0
--- /dev/null
+++ b/net/core/lwtunnel.c
@@ -0,0 +1,249 @@
+/*
+ * lwtunnel Infrastructure for light weight tunnels like mpls
+ *
+ * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/lwtunnel.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <net/lwtunnel.h>
+#include <net/rtnetlink.h>
+#include <net/ip6_fib.h>
+
+struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
+{
+ struct lwtunnel_state *lws;
+
+ lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
+
+ return lws;
+}
+EXPORT_SYMBOL(lwtunnel_state_alloc);
+
+static const struct lwtunnel_encap_ops __rcu *
+ lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+ unsigned int num)
+{
+ if (num > LWTUNNEL_ENCAP_MAX)
+ return -ERANGE;
+
+ return !cmpxchg((const struct lwtunnel_encap_ops **)
+ &lwtun_encaps[num],
+ NULL, ops) ? 0 : -1;
+}
+EXPORT_SYMBOL(lwtunnel_encap_add_ops);
+
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
+ unsigned int encap_type)
+{
+ int ret;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE ||
+ encap_type > LWTUNNEL_ENCAP_MAX)
+ return -ERANGE;
+
+ ret = (cmpxchg((const struct lwtunnel_encap_ops **)
+ &lwtun_encaps[encap_type],
+ ops, NULL) == ops) ? 0 : -1;
+
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_encap_del_ops);
+
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap, unsigned int family,
+ const void *cfg, struct lwtunnel_state **lws)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE ||
+ encap_type > LWTUNNEL_ENCAP_MAX)
+ return ret;
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
+ if (likely(ops && ops->build_state))
+ ret = ops->build_state(dev, encap, family, cfg, lws);
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+{
+ const struct lwtunnel_encap_ops *ops;
+ struct nlattr *nest;
+ int ret = -EINVAL;
+
+ if (!lwtstate)
+ return 0;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ ret = -EOPNOTSUPP;
+ nest = nla_nest_start(skb, RTA_ENCAP);
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->fill_encap))
+ ret = ops->fill_encap(skb, lwtstate);
+ rcu_read_unlock();
+
+ if (ret)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest);
+ ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
+ if (ret)
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+
+ return (ret == -EOPNOTSUPP ? 0 : ret);
+}
+EXPORT_SYMBOL(lwtunnel_fill_encap);
+
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = 0;
+
+ if (!lwtstate)
+ return 0;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->get_encap_size))
+ ret = nla_total_size(ops->get_encap_size(lwtstate));
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_get_encap_size);
+
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = 0;
+
+ if (!a && !b)
+ return 0;
+
+ if (!a || !b)
+ return 1;
+
+ if (a->type != b->type)
+ return 1;
+
+ if (a->type == LWTUNNEL_ENCAP_NONE ||
+ a->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[a->type]);
+ if (likely(ops && ops->cmp_encap))
+ ret = ops->cmp_encap(a, b);
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_cmp_encap);
+
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ const struct lwtunnel_encap_ops *ops;
+ struct lwtunnel_state *lwtstate;
+ int ret = -EINVAL;
+
+ if (!dst)
+ goto drop;
+ lwtstate = dst->lwtstate;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->output))
+ ret = ops->output(sk, skb);
+ rcu_read_unlock();
+
+ if (ret == -EOPNOTSUPP)
+ goto drop;
+
+ return ret;
+
+drop:
+ kfree_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_output);
+
+int lwtunnel_input(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ const struct lwtunnel_encap_ops *ops;
+ struct lwtunnel_state *lwtstate;
+ int ret = -EINVAL;
+
+ if (!dst)
+ goto drop;
+ lwtstate = dst->lwtstate;
+
+ if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+ lwtstate->type > LWTUNNEL_ENCAP_MAX)
+ return 0;
+
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+ if (likely(ops && ops->input))
+ ret = ops->input(skb);
+ rcu_read_unlock();
+
+ if (ret == -EOPNOTSUPP)
+ goto drop;
+
+ return ret;
+
+drop:
+ kfree_skb(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_input);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 84195dacb..2b515ba7e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -274,8 +274,12 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
if (!neigh_forced_gc(tbl) &&
- entries >= tbl->gc_thresh3)
+ entries >= tbl->gc_thresh3) {
+ net_info_ratelimited("%s: neighbor table overflow!\n",
+ tbl->id);
+ NEIGH_CACHE_STAT_INC(tbl, table_fulls);
goto out_entries;
+ }
}
n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
@@ -1849,6 +1853,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
ndst.ndts_forced_gc_runs += st->forced_gc_runs;
+ ndst.ndts_table_fulls += st->table_fulls;
}
if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
@@ -2717,12 +2722,12 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
struct neigh_statistics *st = v;
if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
+ seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
return 0;
}
seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
- "%08lx %08lx %08lx %08lx %08lx\n",
+ "%08lx %08lx %08lx %08lx %08lx %08lx\n",
atomic_read(&tbl->entries),
st->allocs,
@@ -2739,7 +2744,8 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
st->periodic_gc_runs,
st->forced_gc_runs,
- st->unres_discards
+ st->unres_discards,
+ st->table_fulls
);
return 0;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 18b34d771..830f8a7c1 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -31,7 +31,6 @@
static const char fmt_hex[] = "%#x\n";
static const char fmt_long_hex[] = "%#lx\n";
static const char fmt_dec[] = "%d\n";
-static const char fmt_udec[] = "%u\n";
static const char fmt_ulong[] = "%lu\n";
static const char fmt_u64[] = "%llu\n";
@@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev,
if (netif_running(netdev)) {
struct ethtool_cmd cmd;
if (!__ethtool_get_settings(netdev, &cmd))
- ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
+ ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
}
rtnl_unlock();
return ret;
@@ -404,6 +403,19 @@ static ssize_t group_store(struct device *dev, struct device_attribute *attr,
NETDEVICE_SHOW(group, fmt_dec);
static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
+static int change_proto_down(struct net_device *dev, unsigned long proto_down)
+{
+ return dev_change_proto_down(dev, (bool) proto_down);
+}
+
+static ssize_t proto_down_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_proto_down);
+}
+NETDEVICE_SHOW_RW(proto_down, fmt_dec);
+
static ssize_t phys_port_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -501,6 +513,7 @@ static struct attribute *net_class_attrs[] = {
&dev_attr_phys_port_id.attr,
&dev_attr_phys_port_name.attr,
&dev_attr_phys_switch_id.attr,
+ &dev_attr_proto_down.attr,
NULL,
};
ATTRIBUTE_GROUPS(net_class);
@@ -675,7 +688,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
struct rps_map *old_map, *map;
cpumask_var_t mask;
int err, cpu, i;
- static DEFINE_SPINLOCK(rps_map_lock);
+ static DEFINE_MUTEX(rps_map_mutex);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -708,18 +721,21 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
map = NULL;
}
- spin_lock(&rps_map_lock);
+ mutex_lock(&rps_map_mutex);
old_map = rcu_dereference_protected(queue->rps_map,
- lockdep_is_held(&rps_map_lock));
+ mutex_is_locked(&rps_map_mutex));
rcu_assign_pointer(queue->rps_map, map);
- spin_unlock(&rps_map_lock);
if (map)
static_key_slow_inc(&rps_needed);
- if (old_map) {
- kfree_rcu(old_map, rcu);
+ if (old_map)
static_key_slow_dec(&rps_needed);
- }
+
+ mutex_unlock(&rps_map_mutex);
+
+ if (old_map)
+ kfree_rcu(old_map, rcu);
+
free_cpumask_var(mask);
return len;
}
@@ -1464,6 +1480,15 @@ static int of_dev_node_match(struct device *dev, const void *data)
return ret == 0 ? dev->of_node == data : ret;
}
+/*
+ * of_find_net_device_by_node - lookup the net device for the device node
+ * @np: OF device node
+ *
+ * Looks up the net_device structure corresponding with the device node.
+ * If successful, returns a pointer to the net_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure. The
+ * refcount must be dropped when done with the net_device.
+ */
struct net_device *of_find_net_device_by_node(struct device_node *np)
{
struct device *dev;
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index ba3c01207..adef015b2 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -31,6 +31,7 @@
#include <trace/events/napi.h>
#include <trace/events/sock.h>
#include <trace/events/udp.h>
+#include <trace/events/fib.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c126a878c..8bdada242 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -142,7 +142,7 @@ static void queue_process(struct work_struct *work)
*/
static int poll_one_napi(struct napi_struct *napi, int budget)
{
- int work;
+ int work = 0;
/* net_rx_action's ->poll() invocations and our's are
* synchronized by this test which is only made while
@@ -151,7 +151,12 @@ static int poll_one_napi(struct napi_struct *napi, int budget)
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
return budget;
- set_bit(NAPI_STATE_NPSVC, &napi->state);
+ /* If we set this bit but see that it has already been set,
+ * that indicates that napi has been disabled and we need
+ * to abort this operation
+ */
+ if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
+ goto out;
work = napi->poll(napi, budget);
WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
@@ -159,6 +164,7 @@ static int poll_one_napi(struct napi_struct *napi, int budget)
clear_bit(NAPI_STATE_NPSVC, &napi->state);
+out:
return budget - work;
}
@@ -380,6 +386,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
static atomic_t ip_ident;
struct ipv6hdr *ip6h;
+ WARN_ON_ONCE(!irqs_disabled());
+
udp_len = len + sizeof(*udph);
if (np->ipv6)
ip_len = udp_len + sizeof(*ip6h);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1cbd20919..de8d5cc5e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -273,7 +273,6 @@ struct pktgen_dev {
/* runtime counters relating to clone_skb */
- __u64 allocated_skbs;
__u32 clone_count;
int last_ok; /* Was last skb sent?
* Or a failed transmit of some sort?
@@ -2279,7 +2278,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
{
- pkt_dev->pkt_overhead = 0;
+ pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2788,6 +2787,7 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
} else {
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
}
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
return skb;
}
@@ -3397,7 +3397,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
return;
}
pkt_dev->last_pkt_size = pkt_dev->skb->len;
- pkt_dev->allocated_skbs++;
pkt_dev->clone_count = 0; /* reset counter */
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 0861018be..0ec48403e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -678,6 +678,12 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
continue;
if (nla_put_string(skb, i + 1, name))
goto nla_put_failure;
+ } else if (i == RTAX_FEATURES - 1) {
+ u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
+
+ BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
+ if (nla_put_u32(skb, i + 1, user_features))
+ goto nla_put_failure;
} else {
if (nla_put_u32(skb, i + 1, metrics[i]))
goto nla_put_failure;
@@ -896,7 +902,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
- + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
+ + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+ + nla_total_size(1); /* IFLA_PROTO_DOWN */
+
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1082,7 +1090,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
(dev->ifalias &&
nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
- atomic_read(&dev->carrier_changes)))
+ atomic_read(&dev->carrier_changes)) ||
+ nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
goto nla_put_failure;
if (1) {
@@ -1319,6 +1328,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
+ [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1861,6 +1871,14 @@ static int do_setlink(const struct sk_buff *skb,
}
err = 0;
+ if (tb[IFLA_PROTO_DOWN]) {
+ err = dev_change_proto_down(dev,
+ nla_get_u8(tb[IFLA_PROTO_DOWN]));
+ if (err)
+ goto errout;
+ status |= DO_SETLINK_NOTIFY;
+ }
+
errout:
if (status & DO_SETLINK_MODIFIED) {
if (status & DO_SETLINK_NOTIFY)
@@ -1951,16 +1969,30 @@ static int rtnl_group_dellink(const struct net *net, int group)
return 0;
}
+int rtnl_delete_link(struct net_device *dev)
+{
+ const struct rtnl_link_ops *ops;
+ LIST_HEAD(list_kill);
+
+ ops = dev->rtnl_link_ops;
+ if (!ops || !ops->dellink)
+ return -EOPNOTSUPP;
+
+ ops->dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtnl_delete_link);
+
static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
- const struct rtnl_link_ops *ops;
struct net_device *dev;
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
int err;
- LIST_HEAD(list_kill);
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
@@ -1982,13 +2014,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!dev)
return -ENODEV;
- ops = dev->rtnl_link_ops;
- if (!ops || !ops->dellink)
- return -EOPNOTSUPP;
-
- ops->dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
- return 0;
+ return rtnl_delete_link(dev);
}
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7bfa18746..fab4599ba 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -392,7 +392,7 @@ EXPORT_SYMBOL(napi_alloc_frag);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
- * @length: length to allocate
+ * @len: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
@@ -461,7 +461,7 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
/**
* __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
* @napi: napi instance this buffer was allocated for
- * @length: length to allocate
+ * @len: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
*
* Allocate a new sk_buff for use in NAPI receive. This buffer will
diff --git a/net/core/sock.c b/net/core/sock.c
index 193901d09..3307c0224 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2078,7 +2078,7 @@ suppress_allocation:
EXPORT_SYMBOL(__sk_mem_schedule);
/**
- * __sk_reclaim - reclaim memory_allocated
+ * __sk_mem_reclaim - reclaim memory_allocated
* @sk: socket
* @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
*/
@@ -2740,10 +2740,8 @@ static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
return;
kfree(rsk_prot->slab_name);
rsk_prot->slab_name = NULL;
- if (rsk_prot->slab) {
- kmem_cache_destroy(rsk_prot->slab);
- rsk_prot->slab = NULL;
- }
+ kmem_cache_destroy(rsk_prot->slab);
+ rsk_prot->slab = NULL;
}
static int req_prot_init(const struct proto *prot)
@@ -2828,10 +2826,8 @@ void proto_unregister(struct proto *prot)
list_del(&prot->node);
mutex_unlock(&proto_list_mutex);
- if (prot->slab != NULL) {
- kmem_cache_destroy(prot->slab);
- prot->slab = NULL;
- }
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
req_prot_cleanup(prot->rsk_prot);
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 43d3dd62f..42689d5c4 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -60,11 +60,15 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
struct phy_device *phydev;
unsigned int type;
+ if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->drv)
+ return false;
+
if (skb_headroom(skb) < ETH_HLEN)
return false;
+
__skb_push(skb, ETH_HLEN);
- type = classify(skb);
+ type = ptp_classify_raw(skb);
__skb_pull(skb, ETH_HLEN);
diff --git a/net/core/utils.c b/net/core/utils.c
index a7732a068..3dffce953 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -301,7 +301,7 @@ out:
EXPORT_SYMBOL(in6_pton);
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr)
+ __be32 from, __be32 to, bool pseudohdr)
{
if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_replace4(sum, from, to);
@@ -318,7 +318,7 @@ EXPORT_SYMBOL(inet_proto_csum_replace4);
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
- int pseudohdr)
+ bool pseudohdr)
{
__be32 diff[] = {
~from[0], ~from[1], ~from[2], ~from[3],
@@ -336,6 +336,19 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
}
EXPORT_SYMBOL(inet_proto_csum_replace16);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+ __wsum diff, bool pseudohdr)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+ if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+ skb->csum = ~csum_add(diff, ~skb->csum);
+ } else if (pseudohdr) {
+ *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));
+ }
+}
+EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
+
struct __net_random_once_work {
struct work_struct work;
struct static_key *key;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index bd9e718c2..3de0d0362 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -398,12 +398,8 @@ out_err:
void dccp_ackvec_exit(void)
{
- if (dccp_ackvec_slab != NULL) {
- kmem_cache_destroy(dccp_ackvec_slab);
- dccp_ackvec_slab = NULL;
- }
- if (dccp_ackvec_record_slab != NULL) {
- kmem_cache_destroy(dccp_ackvec_record_slab);
- dccp_ackvec_record_slab = NULL;
- }
+ kmem_cache_destroy(dccp_ackvec_slab);
+ dccp_ackvec_slab = NULL;
+ kmem_cache_destroy(dccp_ackvec_record_slab);
+ dccp_ackvec_record_slab = NULL;
}
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 834989751..90f77d08c 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -95,8 +95,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
{
- if (slab != NULL)
- kmem_cache_destroy(slab);
+ kmem_cache_destroy(slab);
}
static int __init ccid_activate(struct ccid_operations *ccid_ops)
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 30addee2d..838f524cf 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_ipv6only = sk->sk_ipv6only;
}
#endif
- /* Linkage updates. */
- __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
/* Get the TIME_WAIT timeout firing. */
if (timeo < rto)
@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
timeo = DCCP_TIMEWAIT_LEN;
inet_twsk_schedule(tw, timeo);
+ /* Linkage updates. */
+ __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
inet_twsk_put(tw);
} else {
/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 9d66a0f72..295bbd6a5 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -229,7 +229,6 @@ static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = {
.configure = dn_fib_rule_configure,
.compare = dn_fib_rule_compare,
.fill = dn_fib_rule_fill,
- .default_pref = fib_default_rule_pref,
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
.policy = dn_fib_rule_policy,
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index b445d492c..adb5325f4 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/sysfs.h>
+#include <linux/phy_fixed.h>
#include "dsa_priv.h"
char dsa_driver_version[] = "0.1";
@@ -176,6 +177,41 @@ __ATTRIBUTE_GROUPS(dsa_hwmon);
#endif /* CONFIG_NET_DSA_HWMON */
/* basic switch operations **************************************************/
+static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master)
+{
+ struct dsa_chip_data *cd = ds->pd;
+ struct device_node *port_dn;
+ struct phy_device *phydev;
+ int ret, port, mode;
+
+ for (port = 0; port < DSA_MAX_PORTS; port++) {
+ if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+ continue;
+
+ port_dn = cd->port_dn[port];
+ if (of_phy_is_fixed_link(port_dn)) {
+ ret = of_phy_register_fixed_link(port_dn);
+ if (ret) {
+ netdev_err(master,
+ "failed to register fixed PHY\n");
+ return ret;
+ }
+ phydev = of_phy_find_device(port_dn);
+
+ mode = of_get_phy_mode(port_dn);
+ if (mode < 0)
+ mode = PHY_INTERFACE_MODE_NA;
+ phydev->interface = mode;
+
+ genphy_config_init(phydev);
+ genphy_read_status(phydev);
+ if (ds->drv->adjust_link)
+ ds->drv->adjust_link(ds, port, phydev);
+ }
+ }
+ return 0;
+}
+
static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
{
struct dsa_switch_driver *drv = ds->drv;
@@ -270,7 +306,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
if (ret < 0)
goto out;
- ds->slave_mii_bus = mdiobus_alloc();
+ ds->slave_mii_bus = devm_mdiobus_alloc(parent);
if (ds->slave_mii_bus == NULL) {
ret = -ENOMEM;
goto out;
@@ -279,7 +315,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
ret = mdiobus_register(ds->slave_mii_bus);
if (ret < 0)
- goto out_free;
+ goto out;
/*
@@ -297,6 +333,14 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
}
}
+ /* Perform configuration of the CPU and DSA ports */
+ ret = dsa_cpu_dsa_setup(ds, dst->master_netdev);
+ if (ret < 0) {
+ netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
+ index);
+ ret = 0;
+ }
+
#ifdef CONFIG_NET_DSA_HWMON
/* If the switch provides a temperature sensor,
* register with hardware monitoring subsystem.
@@ -324,10 +368,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
return ret;
-out_free:
- mdiobus_free(ds->slave_mii_bus);
out:
- kfree(ds);
return ret;
}
@@ -357,7 +398,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
/*
* Allocate and initialise switch state.
*/
- ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+ ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
if (ds == NULL)
return ERR_PTR(-ENOMEM);
@@ -377,10 +418,47 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
static void dsa_switch_destroy(struct dsa_switch *ds)
{
+ struct device_node *port_dn;
+ struct phy_device *phydev;
+ struct dsa_chip_data *cd = ds->pd;
+ int port;
+
#ifdef CONFIG_NET_DSA_HWMON
if (ds->hwmon_dev)
hwmon_device_unregister(ds->hwmon_dev);
#endif
+
+ /* Disable configuration of the CPU and DSA ports */
+ for (port = 0; port < DSA_MAX_PORTS; port++) {
+ if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+ continue;
+
+ port_dn = cd->port_dn[port];
+ if (of_phy_is_fixed_link(port_dn)) {
+ phydev = of_phy_find_device(port_dn);
+ if (phydev) {
+ int addr = phydev->addr;
+
+ phy_device_free(phydev);
+ of_node_put(port_dn);
+ fixed_phy_del(addr);
+ }
+ }
+ }
+
+ /* Destroy network devices for physical switch ports. */
+ for (port = 0; port < DSA_MAX_PORTS; port++) {
+ if (!(ds->phys_port_mask & (1 << port)))
+ continue;
+
+ if (!ds->ports[port])
+ continue;
+
+ unregister_netdev(ds->ports[port]);
+ free_netdev(ds->ports[port]);
+ }
+
+ mdiobus_unregister(ds->slave_mii_bus);
}
#ifdef CONFIG_PM_SLEEP
@@ -554,6 +632,31 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
return 0;
}
+static int dsa_of_probe_links(struct dsa_platform_data *pd,
+ struct dsa_chip_data *cd,
+ int chip_index, int port_index,
+ struct device_node *port,
+ const char *port_name)
+{
+ struct device_node *link;
+ int link_index;
+ int ret;
+
+ for (link_index = 0;; link_index++) {
+ link = of_parse_phandle(port, "link", link_index);
+ if (!link)
+ break;
+
+ if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) {
+ ret = dsa_of_setup_routing_table(pd, cd, chip_index,
+ port_index, link);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
{
int i;
@@ -566,6 +669,10 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
port_index++;
}
kfree(pd->chip[i].rtable);
+
+ /* Drop our reference to the MDIO bus device */
+ if (pd->chip[i].host_dev)
+ put_device(pd->chip[i].host_dev);
}
kfree(pd->chip);
}
@@ -573,8 +680,8 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
static int dsa_of_probe(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct device_node *child, *mdio, *ethernet, *port, *link;
- struct mii_bus *mdio_bus;
+ struct device_node *child, *mdio, *ethernet, *port;
+ struct mii_bus *mdio_bus, *mdio_bus_switch;
struct net_device *ethernet_dev;
struct dsa_platform_data *pd;
struct dsa_chip_data *cd;
@@ -593,16 +700,22 @@ static int dsa_of_probe(struct device *dev)
return -EPROBE_DEFER;
ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
- if (!ethernet)
- return -EINVAL;
+ if (!ethernet) {
+ ret = -EINVAL;
+ goto out_put_mdio;
+ }
ethernet_dev = of_find_net_device_by_node(ethernet);
- if (!ethernet_dev)
- return -EPROBE_DEFER;
+ if (!ethernet_dev) {
+ ret = -EPROBE_DEFER;
+ goto out_put_mdio;
+ }
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out_put_ethernet;
+ }
dev->platform_data = pd;
pd->of_netdev = ethernet_dev;
@@ -623,7 +736,9 @@ static int dsa_of_probe(struct device *dev)
cd = &pd->chip[chip_index];
cd->of_node = child;
- cd->host_dev = &mdio_bus->dev;
+
+ /* When assigning the host device, increment its refcount */
+ cd->host_dev = get_device(&mdio_bus->dev);
sw_addr = of_get_property(child, "reg", NULL);
if (!sw_addr)
@@ -636,6 +751,22 @@ static int dsa_of_probe(struct device *dev)
if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
cd->eeprom_len = eeprom_len;
+ mdio = of_parse_phandle(child, "mii-bus", 0);
+ if (mdio) {
+ mdio_bus_switch = of_mdio_find_bus(mdio);
+ if (!mdio_bus_switch) {
+ ret = -EPROBE_DEFER;
+ goto out_free_chip;
+ }
+
+ /* Drop the mdio_bus device ref, replacing the host
+ * device with the mdio_bus_switch device, keeping
+ * the refcount from of_mdio_find_bus() above.
+ */
+ put_device(cd->host_dev);
+ cd->host_dev = &mdio_bus_switch->dev;
+ }
+
for_each_available_child_of_node(child, port) {
port_reg = of_get_property(port, "reg", NULL);
if (!port_reg)
@@ -658,19 +789,18 @@ static int dsa_of_probe(struct device *dev)
goto out_free_chip;
}
- link = of_parse_phandle(port, "link", 0);
-
- if (!strcmp(port_name, "dsa") && link &&
- pd->nr_chips > 1) {
- ret = dsa_of_setup_routing_table(pd, cd,
- chip_index, port_index, link);
- if (ret)
- goto out_free_chip;
- }
+ ret = dsa_of_probe_links(pd, cd, chip_index,
+ port_index, port, port_name);
+ if (ret)
+ goto out_free_chip;
}
}
+ /* The individual chips hold their own refcount on the mdio bus,
+ * so drop ours */
+ put_device(&mdio_bus->dev);
+
return 0;
out_free_chip:
@@ -678,6 +808,10 @@ out_free_chip:
out_free:
kfree(pd);
dev->platform_data = NULL;
+out_put_ethernet:
+ put_device(&ethernet_dev->dev);
+out_put_mdio:
+ put_device(&mdio_bus->dev);
return ret;
}
@@ -689,6 +823,7 @@ static void dsa_of_remove(struct device *dev)
return;
dsa_of_free_platform_data(pd);
+ put_device(&pd->of_netdev->dev);
kfree(pd);
}
#else
@@ -702,10 +837,11 @@ static inline void dsa_of_remove(struct device *dev)
}
#endif
-static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
- struct device *parent, struct dsa_platform_data *pd)
+static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+ struct device *parent, struct dsa_platform_data *pd)
{
int i;
+ unsigned configured = 0;
dst->pd = pd;
dst->master_netdev = dev;
@@ -725,9 +861,17 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
dst->ds[i] = ds;
if (ds->drv->poll_link != NULL)
dst->link_poll_needed = 1;
+
+ ++configured;
}
/*
+ * If no switch was found, exit cleanly
+ */
+ if (!configured)
+ return -EPROBE_DEFER;
+
+ /*
* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
* sent to the tag format's receive function.
@@ -743,6 +887,8 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
add_timer(&dst->link_poll_timer);
}
+
+ return 0;
}
static int dsa_probe(struct platform_device *pdev)
@@ -783,7 +929,7 @@ static int dsa_probe(struct platform_device *pdev)
goto out;
}
- dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
if (dst == NULL) {
dev_put(dev);
ret = -ENOMEM;
@@ -792,7 +938,9 @@ static int dsa_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dst);
- dsa_setup_dst(dst, dev, &pdev->dev, pd);
+ ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
+ if (ret)
+ goto out;
return 0;
@@ -814,7 +962,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
for (i = 0; i < dst->pd->nr_chips; i++) {
struct dsa_switch *ds = dst->ds[i];
- if (ds != NULL)
+ if (ds)
dsa_switch_destroy(ds);
}
}
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index d5f1f9b86..311796c80 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -13,9 +13,10 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
struct dsa_device_ops {
- netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+ struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
int (*rcv)(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
};
@@ -26,7 +27,7 @@ struct dsa_slave_priv {
* switch port.
*/
struct net_device *dev;
- netdev_tx_t (*xmit)(struct sk_buff *skb,
+ struct sk_buff * (*xmit)(struct sk_buff *skb,
struct net_device *dev);
/*
@@ -47,6 +48,9 @@ struct dsa_slave_priv {
int old_duplex;
struct net_device *bridge_dev;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
};
/* dsa.c */
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 25dbb91e1..7d91f4612 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -18,6 +18,7 @@
#include <net/rtnetlink.h>
#include <net/switchdev.h>
#include <linux/if_bridge.h>
+#include <linux/netpoll.h>
#include "dsa_priv.h"
/* slave mii_bus handling ***************************************************/
@@ -199,103 +200,212 @@ out:
return 0;
}
-static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid, u16 nlm_flags)
+static int dsa_bridge_check_vlan_range(struct dsa_switch *ds,
+ const struct net_device *bridge,
+ u16 vid_begin, u16 vid_end)
{
+ struct dsa_slave_priv *p;
+ struct net_device *dev, *vlan_br;
+ DECLARE_BITMAP(members, DSA_MAX_PORTS);
+ DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+ u16 vid;
+ int member, err;
+
+ if (!ds->drv->vlan_getnext || !vid_begin)
+ return -EOPNOTSUPP;
+
+ vid = vid_begin - 1;
+
+ do {
+ err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+ if (err)
+ break;
+
+ if (vid > vid_end)
+ break;
+
+ member = find_first_bit(members, DSA_MAX_PORTS);
+ if (member == DSA_MAX_PORTS)
+ continue;
+
+ dev = ds->ports[member];
+ p = netdev_priv(dev);
+ vlan_br = p->bridge_dev;
+ if (vlan_br == bridge)
+ continue;
+
+ netdev_dbg(vlan_br, "hardware VLAN %d already in use\n", vid);
+ return -EOPNOTSUPP;
+ } while (vid < vid_end);
+
+ return err == -ENOENT ? 0 : err;
+}
+
+static int dsa_slave_port_vlan_add(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ struct switchdev_obj_vlan *vlan = &obj->u.vlan;
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- int ret = -EOPNOTSUPP;
+ u16 vid;
+ int err;
- if (ds->drv->fdb_add)
- ret = ds->drv->fdb_add(ds, p->port, addr, vid);
+ switch (obj->trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ if (!ds->drv->port_vlan_add || !ds->drv->port_pvid_set)
+ return -EOPNOTSUPP;
- return ret;
+ /* If the requested port doesn't belong to the same bridge as
+ * the VLAN members, fallback to software VLAN (hopefully).
+ */
+ err = dsa_bridge_check_vlan_range(ds, p->bridge_dev,
+ vlan->vid_begin,
+ vlan->vid_end);
+ if (err)
+ return err;
+ break;
+ case SWITCHDEV_TRANS_COMMIT:
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ err = ds->drv->port_vlan_add(ds, p->port, vid,
+ vlan->flags &
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (!err && vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ err = ds->drv->port_pvid_set(ds, p->port, vid);
+ if (err)
+ return err;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
-static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int dsa_slave_port_vlan_del(struct net_device *dev,
+ struct switchdev_obj *obj)
{
+ struct switchdev_obj_vlan *vlan = &obj->u.vlan;
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- int ret = -EOPNOTSUPP;
+ u16 vid;
+ int err;
- if (ds->drv->fdb_del)
- ret = ds->drv->fdb_del(ds, p->port, addr, vid);
+ if (!ds->drv->port_vlan_del)
+ return -EOPNOTSUPP;
- return ret;
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ err = ds->drv->port_vlan_del(ds, p->port, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
-static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb,
- const unsigned char *addr, u16 vid,
- bool is_static,
- u32 portid, u32 seq, int type,
- unsigned int flags)
+static int dsa_slave_port_vlan_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
{
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
+ struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ DECLARE_BITMAP(members, DSA_MAX_PORTS);
+ DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+ u16 pvid, vid = 0;
+ int err;
- nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
- if (!nlh)
- return -EMSGSIZE;
+ if (!ds->drv->vlan_getnext || !ds->drv->port_pvid_get)
+ return -EOPNOTSUPP;
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_EXT_LEARNED;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = dev->ifindex;
- ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+ err = ds->drv->port_pvid_get(ds, p->port, &pvid);
+ if (err)
+ return err;
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
+ for (;;) {
+ err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+ if (err)
+ break;
- if (vid && nla_put_u16(skb, NDA_VLAN, vid))
- goto nla_put_failure;
+ if (!test_bit(p->port, members))
+ continue;
- nlmsg_end(skb, nlh);
- return 0;
+ memset(vlan, 0, sizeof(*vlan));
+ vlan->vid_begin = vlan->vid_end = vid;
+
+ if (vid == pvid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ if (test_bit(p->port, untagged))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+ err = obj->cb(dev, obj);
+ if (err)
+ break;
+ }
+
+ return err == -ENOENT ? 0 : err;
+}
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+static int dsa_slave_port_fdb_add(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+ if (obj->trans == SWITCHDEV_TRANS_PREPARE)
+ ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP;
+ else if (obj->trans == SWITCHDEV_TRANS_COMMIT)
+ ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid);
+
+ return ret;
}
-/* Dump information about entries, in response to GETNEIGH */
-static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev, int idx)
+static int dsa_slave_port_fdb_del(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+ if (ds->drv->port_fdb_del)
+ ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid);
+
+ return ret;
+}
+
+static int dsa_slave_port_fdb_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
unsigned char addr[ETH_ALEN] = { 0 };
+ u16 vid = 0;
int ret;
- if (!ds->drv->fdb_getnext)
+ if (!ds->drv->port_fdb_getnext)
return -EOPNOTSUPP;
- for (; ; idx++) {
+ for (;;) {
bool is_static;
- ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static);
+ ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
+ &is_static);
if (ret < 0)
break;
- if (idx < cb->args[0])
- continue;
+ obj->u.fdb.addr = addr;
+ obj->u.fdb.vid = vid;
+ obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
- ret = dsa_slave_fill_info(dev, skb, addr, 0,
- is_static,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH, NLM_F_MULTI);
+ ret = obj->cb(dev, obj);
if (ret < 0)
break;
}
- return idx;
+ return ret == -ENOENT ? 0 : ret;
}
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -368,6 +478,71 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
return ret;
}
+static int dsa_slave_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ int err;
+
+ /* For the prepare phase, ensure the full set of changes is feasable in
+ * one go in order to signal a failure properly. If an operation is not
+ * supported, return -EOPNOTSUPP.
+ */
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = dsa_slave_port_fdb_add(dev, obj);
+ break;
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = dsa_slave_port_vlan_add(dev, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dsa_slave_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = dsa_slave_port_fdb_del(dev, obj);
+ break;
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = dsa_slave_port_vlan_del(dev, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dsa_slave_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = dsa_slave_port_fdb_dump(dev, obj);
+ break;
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = dsa_slave_port_vlan_dump(dev, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
static int dsa_slave_bridge_port_join(struct net_device *dev,
struct net_device *br)
{
@@ -423,24 +598,53 @@ static int dsa_slave_port_attr_get(struct net_device *dev,
return 0;
}
-static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline netdev_tx_t dsa_netpoll_send_skb(struct dsa_slave_priv *p,
+ struct sk_buff *skb)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
-
- return p->xmit(skb, dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (p->netpoll)
+ netpoll_send_skb(p->netpoll, skb);
+#else
+ BUG();
+#endif
+ return NETDEV_TX_OK;
}
-static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
+ struct sk_buff *nskb;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Transmit function may have to reallocate the original SKB */
+ nskb = p->xmit(skb, dev);
+ if (!nskb)
+ return NETDEV_TX_OK;
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
+ /* SKB for netpoll still need to be mangled with the protocol-specific
+ * tag to be successfully transmitted
+ */
+ if (unlikely(netpoll_tx_running(dev)))
+ return dsa_netpoll_send_skb(p, nskb);
+
+ /* Queue the SKB for transmission on the parent interface, but
+ * do not modify its EtherType
+ */
+ nskb->dev = p->parent->dst->master_netdev;
+ dev_queue_xmit(nskb);
return NETDEV_TX_OK;
}
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ /* Just return the original SKB */
+ return skb;
+}
+
/* ethtool operations *******************************************************/
static int
@@ -670,6 +874,49 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
return ret;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int dsa_slave_netpoll_setup(struct net_device *dev,
+ struct netpoll_info *ni)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ struct net_device *master = ds->dst->master_netdev;
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ if (!netpoll)
+ return -ENOMEM;
+
+ err = __netpoll_setup(netpoll, master);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ p->netpoll = netpoll;
+out:
+ return err;
+}
+
+static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct netpoll *netpoll = p->netpoll;
+
+ if (!netpoll)
+ return;
+
+ p->netpoll = NULL;
+
+ __netpoll_free_async(netpoll);
+}
+
+static void dsa_slave_poll_controller(struct net_device *dev)
+{
+}
+#endif
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_settings = dsa_slave_get_settings,
.set_settings = dsa_slave_set_settings,
@@ -697,16 +944,27 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
- .ndo_fdb_add = dsa_slave_fdb_add,
- .ndo_fdb_del = dsa_slave_fdb_del,
- .ndo_fdb_dump = dsa_slave_fdb_dump,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_do_ioctl = dsa_slave_ioctl,
.ndo_get_iflink = dsa_slave_get_iflink,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = dsa_slave_netpoll_setup,
+ .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
+ .ndo_poll_controller = dsa_slave_poll_controller,
+#endif
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
};
static const struct switchdev_ops dsa_slave_switchdev_ops = {
.switchdev_port_attr_get = dsa_slave_port_attr_get,
.switchdev_port_attr_set = dsa_slave_port_attr_set,
+ .switchdev_port_obj_add = dsa_slave_port_obj_add,
+ .switchdev_port_obj_del = dsa_slave_port_obj_del,
+ .switchdev_port_obj_dump = dsa_slave_port_obj_dump,
};
static void dsa_slave_adjust_link(struct net_device *dev)
@@ -894,7 +1152,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
slave_dev->features = master->vlan_features;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
- slave_dev->tx_queue_len = 0;
+ slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 83d3572cd..e2aadb731 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -58,14 +58,11 @@
#define BRCM_EG_TC_MASK 0x7
#define BRCM_EG_PID_MASK 0x1f
-static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *brcm_tag;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
goto out_free;
@@ -87,17 +84,11 @@ static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
- /* Queue the SKB for transmission on the parent interface, but
- * do not modify its EtherType
- */
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
-
- return NETDEV_TX_OK;
+ return skb;
out_free:
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 2dab27063..aa780e4ac 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -15,14 +15,11 @@
#define DSA_HLEN 4
-static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *dsa_header;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
/*
* Convert the outermost 802.1q tag to a DSA tag for tagged
* packets, or insert a DSA tag between the addresses and
@@ -63,14 +60,11 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
dsa_header[3] = 0x00;
}
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
-
- return NETDEV_TX_OK;
+ return skb;
out_free:
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 9aeda596f..2288c8098 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -16,14 +16,11 @@
#define DSA_HLEN 4
#define EDSA_HLEN 8
-static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *edsa_header;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
/*
* Convert the outermost 802.1q tag to a DSA tag and prepend
* a DSA ethertype field is the packet is tagged, or insert
@@ -76,14 +73,11 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
edsa_header[7] = 0x00;
}
- skb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(skb);
-
- return NETDEV_TX_OK;
+ return skb;
out_free:
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index e268f9db8..b6ca0890d 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -13,16 +13,13 @@
#include <linux/slab.h>
#include "dsa_priv.h"
-static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct sk_buff *nskb;
int padlen;
u8 *trailer;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
/*
* We have to make sure that the trailer ends up as the very
* last 4 bytes of the packet. This means that we have to pad
@@ -36,7 +33,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
if (nskb == NULL) {
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
skb_reserve(nskb, NET_IP_ALIGN);
@@ -57,10 +54,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
trailer[2] = 0x10;
trailer[3] = 0x00;
- nskb->dev = p->parent->dst->master_netdev;
- dev_queue_xmit(nskb);
-
- return NETDEV_TX_OK;
+ return nskb;
}
static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -84,7 +78,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
trailer = skb_tail_pointer(skb) - 4;
if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
- (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00)
+ (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
goto out_drop;
source_port = trailer[1] & 7;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 5ee0be64b..74098a09f 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -114,7 +114,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL(eth_header);
/**
- * eth_get_headlen - determine the the length of header for an ethernet frame
+ * eth_get_headlen - determine the length of header for an ethernet frame
* @data: pointer to start of frame
* @len: total length of frame
*
@@ -132,7 +132,7 @@ u32 eth_get_headlen(void *data, unsigned int len)
/* parse any remaining L2/L3 headers, check for L4 */
if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
- sizeof(*eth), len))
+ sizeof(*eth), len, 0))
return max_t(u32, keys.control.thoff, sizeof(*eth));
/* parse for any L4 headers */
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 44d27469a..35a9788bb 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -392,7 +392,7 @@ void hsr_dev_setup(struct net_device *dev)
dev->header_ops = &hsr_header_ops;
dev->netdev_ops = &hsr_device_ops;
SET_NETDEV_DEVTYPE(dev, &hsr_type);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->destructor = hsr_dev_destroy;
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index e50f69da7..ea339fa94 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -5,6 +5,7 @@
#include <net/ieee802154_netdev.h>
#include <net/inet_frag.h>
+#include <net/6lowpan.h>
struct lowpan_create_arg {
u16 tag;
@@ -37,26 +38,18 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
}
}
-struct lowpan_dev_record {
- struct net_device *ldev;
- struct list_head list;
-};
-
/* private device info */
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
- struct mutex dev_list_mtx; /* mutex for list ops */
u16 fragment_tag;
};
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
{
- return netdev_priv(dev);
+ return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
}
-extern struct list_head lowpan_devices;
-
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
void lowpan_net_frag_exit(void);
int lowpan_net_frag_init(void);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index f20a387a1..953b1c49f 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -52,8 +52,7 @@
#include "6lowpan_i.h"
-LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
+static int open_count;
static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
@@ -91,7 +90,7 @@ static void lowpan_setup(struct net_device *dev)
dev->hard_header_len = 2 + 1 + 20 + 14;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = IPV6_MIN_MTU;
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = 0;
@@ -114,7 +113,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_device *real_dev;
- struct lowpan_dev_record *entry;
int ret;
ASSERT_RTNL();
@@ -133,67 +131,52 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- lowpan_dev_info(dev)->real_dev = real_dev;
- mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ if (real_dev->ieee802154_ptr->lowpan_dev) {
dev_put(real_dev);
- lowpan_dev_info(dev)->real_dev = NULL;
- return -ENOMEM;
+ return -EBUSY;
}
- entry->ldev = dev;
-
+ lowpan_dev_info(dev)->real_dev = real_dev;
/* Set the lowpan hardware address to the wpan hardware address. */
memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- INIT_LIST_HEAD(&entry->list);
- list_add_tail(&entry->list, &lowpan_devices);
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+ lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
ret = register_netdevice(dev);
- if (ret >= 0) {
- if (!lowpan_open_count)
- lowpan_rx_init();
- lowpan_open_count++;
+ if (ret < 0) {
+ dev_put(real_dev);
+ return ret;
}
- return ret;
+ real_dev->ieee802154_ptr->lowpan_dev = dev;
+ if (!open_count)
+ lowpan_rx_init();
+
+ open_count++;
+
+ return 0;
}
static void lowpan_dellink(struct net_device *dev, struct list_head *head)
{
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
struct net_device *real_dev = lowpan_dev->real_dev;
- struct lowpan_dev_record *entry, *tmp;
ASSERT_RTNL();
- lowpan_open_count--;
- if (!lowpan_open_count)
- lowpan_rx_exit();
-
- mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (entry->ldev == dev) {
- list_del(&entry->list);
- kfree(entry);
- }
- }
- mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+ open_count--;
- mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
- unregister_netdevice_queue(dev, head);
+ if (!open_count)
+ lowpan_rx_exit();
+ real_dev->ieee802154_ptr->lowpan_dev = NULL;
+ unregister_netdevice(dev);
dev_put(real_dev);
}
static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
.kind = "lowpan",
- .priv_size = sizeof(struct lowpan_dev_info),
+ .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
.setup = lowpan_setup,
.newlink = lowpan_newlink,
.dellink = lowpan_dellink,
@@ -214,19 +197,21 @@ static int lowpan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- LIST_HEAD(del_list);
- struct lowpan_dev_record *entry, *tmp;
if (dev->type != ARPHRD_IEEE802154)
goto out;
- if (event == NETDEV_UNREGISTER) {
- list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
- if (lowpan_dev_info(entry->ldev)->real_dev == dev)
- lowpan_dellink(entry->ldev, &del_list);
- }
-
- unregister_netdevice_many(&del_list);
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ /* Check if wpan interface is unregistered that we
+ * also delete possible lowpan interfaces which belongs
+ * to the wpan interface.
+ */
+ if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
+ lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+ break;
+ default:
+ break;
}
out:
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
index 4be1d289a..12e10201d 100644
--- a/net/ieee802154/6lowpan/rx.c
+++ b/net/ieee802154/6lowpan/rx.c
@@ -15,36 +15,14 @@
#include "6lowpan_i.h"
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
- struct net_device *dev)
+static int lowpan_give_skb_to_device(struct sk_buff *skb,
+ struct net_device *dev)
{
- struct lowpan_dev_record *entry;
- struct sk_buff *skb_cp;
- int stat = NET_RX_SUCCESS;
-
+ skb->dev = dev->ieee802154_ptr->lowpan_dev;
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
- rcu_read_lock();
- list_for_each_entry_rcu(entry, &lowpan_devices, list)
- if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
- skb_cp = skb_copy(skb, GFP_ATOMIC);
- if (!skb_cp) {
- kfree_skb(skb);
- rcu_read_unlock();
- return NET_RX_DROP;
- }
-
- skb_cp->dev = entry->ldev;
- stat = netif_rx(skb_cp);
- if (stat == NET_RX_DROP)
- break;
- }
- rcu_read_unlock();
-
- consume_skb(skb);
-
- return stat;
+ return netif_rx(skb);
}
static int
@@ -89,6 +67,10 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
struct ieee802154_hdr hdr;
int ret;
+ if (dev->type != ARPHRD_IEEE802154 ||
+ !dev->ieee802154_ptr->lowpan_dev)
+ goto drop;
+
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto drop;
@@ -99,9 +81,6 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop_skb;
- if (dev->type != ARPHRD_IEEE802154)
- goto drop_skb;
-
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
goto drop_skb;
@@ -109,7 +88,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(skb, 1);
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
} else {
switch (skb->data[0] & 0xe0) {
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
@@ -117,7 +96,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
if (ret < 0)
goto drop_skb;
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
if (ret == 1) {
@@ -125,7 +104,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
if (ret < 0)
goto drop_skb;
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
} else if (ret == -1) {
return NET_RX_DROP;
} else {
@@ -138,7 +117,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
if (ret < 0)
goto drop_skb;
- return lowpan_give_skb_to_devices(skb, NULL);
+ return lowpan_give_skb_to_device(skb, dev);
} else if (ret == -1) {
return NET_RX_DROP;
} else {
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index 2597abbf7..f6263fc12 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -112,7 +112,7 @@ lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
if (IS_ERR(frag))
- return -PTR_ERR(frag);
+ return PTR_ERR(frag);
memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
@@ -224,7 +224,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
} else {
da.mode = IEEE802154_ADDR_LONG;
da.extended_addr = ieee802154_devaddr_from_raw(daddr);
- cb->ackreq = wpan_dev->frame_retries >= 0;
+ cb->ackreq = wpan_dev->ackreq;
}
return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 68f240168..3f89c0abd 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -230,6 +230,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
+
+ [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
};
/* message building helper */
@@ -458,6 +460,7 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
CMD(set_lbt_mode, SET_LBT_MODE);
+ CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
CMD(set_tx_power, SET_TX_POWER);
@@ -656,6 +659,10 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
goto nla_put_failure;
+ /* ackreq default behaviour */
+ if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
return 0;
@@ -1027,7 +1034,7 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
- bool mode;
+ int mode;
if (netif_running(dev))
return -EBUSY;
@@ -1035,13 +1042,39 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL802154_ATTR_LBT_MODE])
return -EINVAL;
- mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+ mode = nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+
+ if (mode != 0 && mode != 1)
+ return -EINVAL;
+
if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
return -EINVAL;
return rdev_set_lbt_mode(rdev, wpan_dev, mode);
}
+static int
+nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ int ackreq;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
+ return -EINVAL;
+
+ ackreq = nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
+
+ if (ackreq != 0 && ackreq != 1)
+ return -EINVAL;
+
+ return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
+}
+
#define NL802154_FLAG_NEED_WPAN_PHY 0x01
#define NL802154_FLAG_NEED_NETDEV 0x02
#define NL802154_FLAG_NEED_RTNL 0x04
@@ -1248,6 +1281,14 @@ static const struct genl_ops nl802154_ops[] = {
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
+ .doit = nl802154_set_ackreq_default,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_NETDEV |
+ NL802154_FLAG_NEED_RTNL,
+ },
};
/* initialisation/exit functions */
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index b2155a123..03b357501 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -24,6 +24,26 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
}
static inline int
+rdev_suspend(struct cfg802154_registered_device *rdev)
+{
+ int ret;
+ trace_802154_rdev_suspend(&rdev->wpan_phy);
+ ret = rdev->ops->suspend(&rdev->wpan_phy);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
+rdev_resume(struct cfg802154_registered_device *rdev)
+{
+ int ret;
+ trace_802154_rdev_resume(&rdev->wpan_phy);
+ ret = rdev->ops->resume(&rdev->wpan_phy);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
unsigned char name_assign_type,
enum nl802154_iftype type, __le64 extended_addr)
@@ -175,4 +195,17 @@ rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
return ret;
}
+static inline int
+rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
+ struct wpan_dev *wpan_dev, bool ackreq)
+{
+ int ret;
+
+ trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
+ ackreq);
+ ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
#endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index 133b42806..bd88525b0 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -14,11 +14,13 @@
*/
#include <linux/device.h>
+#include <linux/rtnetlink.h>
#include <net/cfg802154.h>
#include "core.h"
#include "sysfs.h"
+#include "rdev-ops.h"
static inline struct cfg802154_registered_device *
dev_to_rdev(struct device *dev)
@@ -62,10 +64,46 @@ static struct attribute *pmib_attrs[] = {
};
ATTRIBUTE_GROUPS(pmib);
+#ifdef CONFIG_PM_SLEEP
+static int wpan_phy_suspend(struct device *dev)
+{
+ struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+ int ret = 0;
+
+ if (rdev->ops->suspend) {
+ rtnl_lock();
+ ret = rdev_suspend(rdev);
+ rtnl_unlock();
+ }
+
+ return ret;
+}
+
+static int wpan_phy_resume(struct device *dev)
+{
+ struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+ int ret = 0;
+
+ if (rdev->ops->resume) {
+ rtnl_lock();
+ ret = rdev_resume(rdev);
+ rtnl_unlock();
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
+#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
+#else
+#define WPAN_PHY_PM_OPS NULL
+#endif
+
struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
.dev_groups = pmib_groups,
+ .pm = WPAN_PHY_PM_OPS,
};
int wpan_phy_sysfs_init(void)
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 9b5f0eb36..9a471e41e 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -40,6 +40,28 @@
* rdev->ops traces *
*************************************************************/
+DECLARE_EVENT_CLASS(wpan_phy_only_evt,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT, WPAN_PHY_PR_ARG)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_suspend,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_resume,
+ TP_PROTO(struct wpan_phy *wpan_phy),
+ TP_ARGS(wpan_phy)
+);
+
TRACE_EVENT(802154_rdev_add_virtual_intf,
TP_PROTO(struct wpan_phy *wpan_phy, char *name,
enum nl802154_iftype type, __le64 extended_addr),
@@ -253,6 +275,25 @@ TRACE_EVENT(802154_rdev_set_lbt_mode,
WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
);
+TRACE_EVENT(802154_rdev_set_ackreq_default,
+ TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+ bool ackreq),
+ TP_ARGS(wpan_phy, wpan_dev, ackreq),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ WPAN_DEV_ENTRY
+ __field(bool, ackreq)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ WPAN_DEV_ASSIGN;
+ __entry->ackreq = ackreq;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+ ", ackreq default: %s", WPAN_PHY_PR_ARG,
+ WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
+);
+
TRACE_EVENT(802154_rdev_return_int,
TP_PROTO(struct wpan_phy *wpan_phy, int ret),
TP_ARGS(wpan_phy, ret),
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 23431321c..a3c217b0d 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -331,20 +331,6 @@ config NET_FOU_IP_TUNNELS
When this option is enabled IP tunnels can be configured to use
FOU or GUE encapsulation.
-config GENEVE_CORE
- tristate "Generic Network Virtualization Encapsulation library"
- depends on INET
- select NET_UDP_TUNNEL
- ---help---
- This allows one to create Geneve virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. Geneve is often used
- to tunnel virtual network infrastructure in virtualized environments.
- For more information see:
- http://tools.ietf.org/html/draft-gross-geneve-01
-
- To compile this driver as a module, choose M here: the module
-
-
config INET_AH
tristate "IP: AH transformation"
select XFRM_ALGO
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index efc43f300..89aacb630 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -57,7 +57,6 @@ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
-obj-$(CONFIG_GENEVE_CORE) += geneve_core.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_output.o xfrm4_protocol.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9532ee871..1d0c3adb6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -112,12 +112,14 @@
#include <net/raw.h>
#include <net/icmp.h>
#include <net/inet_common.h>
+#include <net/ip_tunnels.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/secure_seq.h>
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
+#include <net/vrf.h>
/* The inetsw table contains everything that inet_create needs to
@@ -426,6 +428,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct net *net = sock_net(sk);
unsigned short snum;
int chk_addr_ret;
+ u32 tb_id = RT_TABLE_LOCAL;
int err;
/* If the socket has its own bind function then use it. (RAW) */
@@ -447,7 +450,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+ tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
+ chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
/* Not specified by any standard per-se, however it breaks too
* many applications when removed. It is unfortunate since
@@ -1448,38 +1452,51 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+ return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
+
unsigned long snmp_fold_field(void __percpu *mib, int offt)
{
unsigned long res = 0;
int i;
for_each_possible_cpu(i)
- res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
+ res += snmp_get_cpu_field(mib, i, offt);
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
#if BITS_PER_LONG==32
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
+ size_t syncp_offset)
+{
+ void *bhptr;
+ struct u64_stats_sync *syncp;
+ u64 v;
+ unsigned int start;
+
+ bhptr = per_cpu_ptr(mib, cpu);
+ syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
+ do {
+ start = u64_stats_fetch_begin_irq(syncp);
+ v = *(((u64 *)bhptr) + offt);
+ } while (u64_stats_fetch_retry_irq(syncp, start));
+
+ return v;
+}
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
+
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
{
u64 res = 0;
int cpu;
for_each_possible_cpu(cpu) {
- void *bhptr;
- struct u64_stats_sync *syncp;
- u64 v;
- unsigned int start;
-
- bhptr = per_cpu_ptr(mib, cpu);
- syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
- do {
- start = u64_stats_fetch_begin_irq(syncp);
- v = *(((u64 *) bhptr) + offt);
- } while (u64_stats_fetch_retry_irq(syncp, start));
-
- res += v;
+ res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
}
return res;
}
@@ -1780,6 +1797,8 @@ static int __init inet_init(void)
dev_add_pack(&ip_packet_type);
+ ip_tunnel_core_init();
+
rc = 0;
out:
return rc;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index ac9a32ec3..f2a71025a 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -360,8 +360,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
ahp->icv_trunc_len + seqhi_len);
- if (!work_iph)
+ if (!work_iph) {
+ err = -ENOMEM;
goto out;
+ }
seqhi = (__be32 *)((char *)work_iph + ihl);
auth_data = ah_tmp_auth(seqhi, seqhi_len);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 6c8b1fbaf..0c9c3482e 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -113,6 +113,8 @@
#include <net/arp.h>
#include <net/ax25.h>
#include <net/netrom.h>
+#include <net/dst_metadata.h>
+#include <net/ip_tunnels.h>
#include <linux/uaccess.h>
@@ -233,7 +235,7 @@ static int arp_constructor(struct neighbour *neigh)
return -EINVAL;
}
- neigh->type = inet_addr_type(dev_net(dev), addr);
+ neigh->type = inet_addr_type_dev_table(dev_net(dev), dev, addr);
parms = in_dev->arp_parms;
__neigh_parms_put(neigh->parms);
@@ -291,6 +293,39 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
}
+/* Create and send an arp packet. */
+static void arp_send_dst(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw,
+ const unsigned char *target_hw,
+ struct dst_entry *dst)
+{
+ struct sk_buff *skb;
+
+ /* arp on this interface. */
+ if (dev->flags & IFF_NOARP)
+ return;
+
+ skb = arp_create(type, ptype, dest_ip, dev, src_ip,
+ dest_hw, src_hw, target_hw);
+ if (!skb)
+ return;
+
+ skb_dst_set(skb, dst_clone(dst));
+ arp_xmit(skb);
+}
+
+void arp_send(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw, const unsigned char *src_hw,
+ const unsigned char *target_hw)
+{
+ arp_send_dst(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw,
+ target_hw, NULL);
+}
+EXPORT_SYMBOL(arp_send);
+
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
__be32 saddr = 0;
@@ -299,6 +334,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
__be32 target = *(__be32 *)neigh->primary_key;
int probes = atomic_read(&neigh->probes);
struct in_device *in_dev;
+ struct dst_entry *dst = NULL;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
@@ -309,7 +345,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
default:
case 0: /* By default announce any local IP */
- if (skb && inet_addr_type(dev_net(dev),
+ if (skb && inet_addr_type_dev_table(dev_net(dev), dev,
ip_hdr(skb)->saddr) == RTN_LOCAL)
saddr = ip_hdr(skb)->saddr;
break;
@@ -317,7 +353,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (!skb)
break;
saddr = ip_hdr(skb)->saddr;
- if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) {
+ if (inet_addr_type_dev_table(dev_net(dev), dev,
+ saddr) == RTN_LOCAL) {
/* saddr should be known to target */
if (inet_addr_onlink(in_dev, target, saddr))
break;
@@ -346,8 +383,10 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
}
}
- arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
- dst_hw, dev->dev_addr, NULL);
+ if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
+ dst = skb_dst(skb);
+ arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
+ dst_hw, dev->dev_addr, NULL, dst);
}
static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -597,32 +636,6 @@ void arp_xmit(struct sk_buff *skb)
EXPORT_SYMBOL(arp_xmit);
/*
- * Create and send an arp packet.
- */
-void arp_send(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw, const unsigned char *src_hw,
- const unsigned char *target_hw)
-{
- struct sk_buff *skb;
-
- /*
- * No arp on this interface.
- */
-
- if (dev->flags&IFF_NOARP)
- return;
-
- skb = arp_create(type, ptype, dest_ip, dev, src_ip,
- dest_hw, src_hw, target_hw);
- if (!skb)
- return;
-
- arp_xmit(skb);
-}
-EXPORT_SYMBOL(arp_send);
-
-/*
* Process an arp request.
*/
@@ -639,6 +652,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
int addr_type;
struct neighbour *n;
struct net *net = dev_net(dev);
+ struct dst_entry *reply_dst = NULL;
bool is_garp = false;
/* arp_rcv below verifies the ARP header and verifies the device
@@ -739,13 +753,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
* cache.
*/
+ if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb))
+ reply_dst = (struct dst_entry *)
+ iptunnel_metadata_reply(skb_metadata_dst(skb),
+ GFP_ATOMIC);
+
/* Special case: IPv4 duplicate address detection packet (RFC2131) */
if (sip == 0) {
if (arp->ar_op == htons(ARPOP_REQUEST) &&
- inet_addr_type(net, tip) == RTN_LOCAL &&
+ inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL &&
!arp_ignore(in_dev, sip, tip))
- arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
- dev->dev_addr, sha);
+ arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip,
+ sha, dev->dev_addr, sha, reply_dst);
goto out;
}
@@ -764,9 +783,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
if (!dont_send) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n) {
- arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
- dev, tip, sha, dev->dev_addr,
- sha);
+ arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
+ sip, dev, tip, sha,
+ dev->dev_addr, sha,
+ reply_dst);
neigh_release(n);
}
}
@@ -784,13 +804,14 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
skb->pkt_type == PACKET_HOST ||
NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) {
- arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
- dev, tip, sha, dev->dev_addr,
- sha);
+ arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
+ sip, dev, tip, sha,
+ dev->dev_addr, sha,
+ reply_dst);
} else {
pneigh_enqueue(&arp_tbl,
in_dev->arp_parms, skb);
- return 0;
+ goto out_free_dst;
}
goto out;
}
@@ -802,16 +823,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
if (IN_DEV_ARP_ACCEPT(in_dev)) {
+ unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip);
+
/* Unsolicited ARP is not accepted by default.
It is possible, that this option should be enabled for some
devices (strip is candidate)
*/
is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
- inet_addr_type(net, sip) == RTN_UNICAST;
+ addr_type == RTN_UNICAST;
if (!n &&
((arp->ar_op == htons(ARPOP_REPLY) &&
- inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
+ addr_type == RTN_UNICAST) || is_garp))
n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
}
@@ -842,6 +865,8 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
out:
consume_skb(skb);
+out_free_dst:
+ dst_release(reply_dst);
return 0;
}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 574fad9cc..f915abff1 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -74,7 +74,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
inet->inet_daddr = fl4->daddr;
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
- inet_set_txhash(sk);
+ sk_set_txhash(sk);
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6bbc54940..457b2cd75 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -45,6 +45,8 @@
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
+#include <net/vrf.h>
+#include <trace/events/fib.h>
#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -211,12 +213,12 @@ void fib_flush_external(struct net *net)
*/
static inline unsigned int __inet_dev_addr_type(struct net *net,
const struct net_device *dev,
- __be32 addr)
+ __be32 addr, u32 tb_id)
{
struct flowi4 fl4 = { .daddr = addr };
struct fib_result res;
unsigned int ret = RTN_BROADCAST;
- struct fib_table *local_table;
+ struct fib_table *table;
if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
return RTN_BROADCAST;
@@ -225,10 +227,10 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
rcu_read_lock();
- local_table = fib_get_table(net, RT_TABLE_LOCAL);
- if (local_table) {
+ table = fib_get_table(net, tb_id);
+ if (table) {
ret = RTN_UNICAST;
- if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
+ if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
if (!dev || dev == res.fi->fib_dev)
ret = res.type;
}
@@ -238,19 +240,40 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
return ret;
}
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id)
+{
+ return __inet_dev_addr_type(net, NULL, addr, tb_id);
+}
+EXPORT_SYMBOL(inet_addr_type_table);
+
unsigned int inet_addr_type(struct net *net, __be32 addr)
{
- return __inet_dev_addr_type(net, NULL, addr);
+ return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
}
EXPORT_SYMBOL(inet_addr_type);
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
__be32 addr)
{
- return __inet_dev_addr_type(net, dev, addr);
+ u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+ return __inet_dev_addr_type(net, dev, addr, rt_table);
}
EXPORT_SYMBOL(inet_dev_addr_type);
+/* inet_addr_type with dev == NULL but using the table from a dev
+ * if one is associated
+ */
+unsigned int inet_addr_type_dev_table(struct net *net,
+ const struct net_device *dev,
+ __be32 addr)
+{
+ u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+ return __inet_dev_addr_type(net, NULL, addr, rt_table);
+}
+EXPORT_SYMBOL(inet_addr_type_dev_table);
+
__be32 fib_compute_spec_dst(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
@@ -280,6 +303,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_scope = scope;
fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+ fl4.flowi4_tun_key.tun_id = 0;
if (!fib_lookup(net, &fl4, &res, 0))
return FIB_RES_PREFSRC(net, res);
} else {
@@ -308,16 +332,22 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
bool dev_match;
fl4.flowi4_oif = 0;
- fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
+ fl4.flowi4_iif = vrf_master_ifindex_rcu(dev);
+ if (!fl4.flowi4_iif)
+ fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
fl4.daddr = src;
fl4.saddr = dst;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+ fl4.flowi4_tun_key.tun_id = 0;
+ fl4.flowi4_flags = 0;
no_addr = idev->ifa_list == NULL;
fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
+ trace_fib_validate_source(dev, &fl4);
+
net = dev_net(dev);
if (fib_lookup(net, &fl4, &res, 0))
goto last_resort;
@@ -337,6 +367,9 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
if (nh->nh_dev == dev) {
dev_match = true;
break;
+ } else if (vrf_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+ dev_match = true;
+ break;
}
}
#else
@@ -494,9 +527,12 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
addr = sk_extract_addr(&rt->rt_gateway);
if (rt->rt_gateway.sa_family == AF_INET && addr) {
+ unsigned int addr_type;
+
cfg->fc_gw = addr;
+ addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
if (rt->rt_flags & RTF_GATEWAY &&
- inet_addr_type(net, addr) == RTN_UNICAST)
+ addr_type == RTN_UNICAST)
cfg->fc_scope = RT_SCOPE_UNIVERSE;
}
@@ -591,6 +627,8 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_FLOW] = { .type = NLA_U32 },
+ [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
+ [RTA_ENCAP] = { .type = NLA_NESTED },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
@@ -656,6 +694,12 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
case RTA_TABLE:
cfg->fc_table = nla_get_u32(attr);
break;
+ case RTA_ENCAP:
+ cfg->fc_encap = attr;
+ break;
+ case RTA_ENCAP_TYPE:
+ cfg->fc_encap_type = nla_get_u16(attr);
+ break;
}
}
@@ -760,6 +804,7 @@ out:
static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
{
struct net *net = dev_net(ifa->ifa_dev->dev);
+ u32 tb_id = vrf_dev_table_rtnl(ifa->ifa_dev->dev);
struct fib_table *tb;
struct fib_config cfg = {
.fc_protocol = RTPROT_KERNEL,
@@ -774,11 +819,10 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
},
};
- if (type == RTN_UNICAST)
- tb = fib_new_table(net, RT_TABLE_MAIN);
- else
- tb = fib_new_table(net, RT_TABLE_LOCAL);
+ if (!tb_id)
+ tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
+ tb = fib_new_table(net, tb_id);
if (!tb)
return;
@@ -960,11 +1004,14 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
}
if (!(ok & LOCAL_OK)) {
+ unsigned int addr_type;
+
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
/* Check, that this local address finally disappeared. */
- if (gone &&
- inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
+ addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
+ ifa->ifa_local);
+ if (gone && addr_type != RTN_LOCAL) {
/* And the last, but not the least thing.
* We must flush stray FIB entries.
*
@@ -1063,9 +1110,10 @@ static void nl_fib_lookup_exit(struct net *net)
net->ipv4.fibnl = NULL;
}
-static void fib_disable_ip(struct net_device *dev, unsigned long event)
+static void fib_disable_ip(struct net_device *dev, unsigned long event,
+ bool force)
{
- if (fib_sync_down_dev(dev, event))
+ if (fib_sync_down_dev(dev, event, force))
fib_flush(dev_net(dev));
rt_cache_flush(dev_net(dev));
arp_ifdown(dev);
@@ -1093,7 +1141,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
/* Last address was deleted from this interface.
* Disable IP.
*/
- fib_disable_ip(dev, event);
+ fib_disable_ip(dev, event, true);
} else {
rt_cache_flush(dev_net(dev));
}
@@ -1110,7 +1158,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
unsigned int flags;
if (event == NETDEV_UNREGISTER) {
- fib_disable_ip(dev, event);
+ fib_disable_ip(dev, event, true);
rt_flush_dev(dev);
return NOTIFY_DONE;
}
@@ -1131,14 +1179,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
rt_cache_flush(net);
break;
case NETDEV_DOWN:
- fib_disable_ip(dev, event);
+ fib_disable_ip(dev, event, false);
break;
case NETDEV_CHANGE:
flags = dev_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
- fib_sync_down_dev(dev, event);
+ fib_sync_down_dev(dev, event, false);
/* fall through */
case NETDEV_CHANGEMTU:
rt_cache_flush(net);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 18123d50f..f2bda9e89 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -318,7 +318,6 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
.delete = fib4_rule_delete,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
- .default_pref = fib_default_rule_pref,
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3a06586b1..ef5892f5e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -42,6 +42,7 @@
#include <net/ip_fib.h>
#include <net/netlink.h>
#include <net/nexthop.h>
+#include <net/lwtunnel.h>
#include "fib_lookup.h"
@@ -208,6 +209,7 @@ static void free_fib_info_rcu(struct rcu_head *head)
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
dev_put(nexthop_nh->nh_dev);
+ lwtstate_put(nexthop_nh->nh_lwtstate);
free_nh_exceptions(nexthop_nh);
rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
@@ -266,6 +268,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
+ lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
return -1;
onh++;
@@ -366,6 +369,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
if (fi->fib_nhs) {
+ size_t nh_encapsize = 0;
/* Also handles the special case fib_nhs == 1 */
/* each nexthop is packed in an attribute */
@@ -374,8 +378,21 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
/* may contain flow and gateway attribute */
nhsize += 2 * nla_total_size(4);
+ /* grab encap info */
+ for_nexthops(fi) {
+ if (nh->nh_lwtstate) {
+ /* RTA_ENCAP_TYPE */
+ nh_encapsize += lwtunnel_get_encap_size(
+ nh->nh_lwtstate);
+ /* RTA_ENCAP */
+ nh_encapsize += nla_total_size(2);
+ }
+ } endfor_nexthops(fi);
+
/* all nexthops are packed in a nested attribute */
- payload += nla_total_size(fi->fib_nhs * nhsize);
+ payload += nla_total_size((fi->fib_nhs * nhsize) +
+ nh_encapsize);
+
}
return payload;
@@ -421,13 +438,15 @@ static int fib_detect_death(struct fib_info *fi, int order,
if (n) {
state = n->nud_state;
neigh_release(n);
+ } else {
+ return 0;
}
if (state == NUD_REACHABLE)
return 0;
if ((state & NUD_VALID) && order != dflt)
return 0;
if ((state & NUD_VALID) ||
- (*last_idx < 0 && order > dflt)) {
+ (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
*last_resort = fi;
*last_idx = order;
}
@@ -452,6 +471,9 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg)
{
+ struct net *net = cfg->fc_nlinfo.nl_net;
+ int ret;
+
change_nexthops(fi) {
int attrlen;
@@ -475,18 +497,70 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
if (nexthop_nh->nh_tclassid)
fi->fib_net->ipv4.fib_num_tclassid_users++;
#endif
+ nla = nla_find(attrs, attrlen, RTA_ENCAP);
+ if (nla) {
+ struct lwtunnel_state *lwtstate;
+ struct net_device *dev = NULL;
+ struct nlattr *nla_entype;
+
+ nla_entype = nla_find(attrs, attrlen,
+ RTA_ENCAP_TYPE);
+ if (!nla_entype)
+ goto err_inval;
+ if (cfg->fc_oif)
+ dev = __dev_get_by_index(net, cfg->fc_oif);
+ ret = lwtunnel_build_state(dev, nla_get_u16(
+ nla_entype),
+ nla, AF_INET, cfg,
+ &lwtstate);
+ if (ret)
+ goto errout;
+ nexthop_nh->nh_lwtstate =
+ lwtstate_get(lwtstate);
+ }
}
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
return 0;
+
+err_inval:
+ ret = -EINVAL;
+
+errout:
+ return ret;
}
#endif
+static int fib_encap_match(struct net *net, u16 encap_type,
+ struct nlattr *encap,
+ int oif, const struct fib_nh *nh,
+ const struct fib_config *cfg)
+{
+ struct lwtunnel_state *lwtstate;
+ struct net_device *dev = NULL;
+ int ret, result = 0;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (oif)
+ dev = __dev_get_by_index(net, oif);
+ ret = lwtunnel_build_state(dev, encap_type, encap,
+ AF_INET, cfg, &lwtstate);
+ if (!ret) {
+ result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+ lwtstate_free(lwtstate);
+ }
+
+ return result;
+}
+
int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
{
+ struct net *net = cfg->fc_nlinfo.nl_net;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct rtnexthop *rtnh;
int remaining;
@@ -496,6 +570,12 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
return 1;
if (cfg->fc_oif || cfg->fc_gw) {
+ if (cfg->fc_encap) {
+ if (fib_encap_match(net, cfg->fc_encap_type,
+ cfg->fc_encap, cfg->fc_oif,
+ fi->fib_nh, cfg))
+ return 1;
+ }
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
return 0;
@@ -585,7 +665,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
struct fib_nh *nh)
{
- int err;
+ int err = 0;
struct net *net;
struct net_device *dev;
@@ -594,16 +674,18 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
struct fib_result res;
if (nh->nh_flags & RTNH_F_ONLINK) {
+ unsigned int addr_type;
if (cfg->fc_scope >= RT_SCOPE_LINK)
return -EINVAL;
- if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
- return -EINVAL;
dev = __dev_get_by_index(net, nh->nh_oif);
if (!dev)
return -ENODEV;
if (!(dev->flags & IFF_UP))
return -ENETDOWN;
+ addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
+ if (addr_type != RTN_UNICAST)
+ return -EINVAL;
if (!netif_carrier_ok(dev))
nh->nh_flags |= RTNH_F_LINKDOWN;
nh->nh_dev = dev;
@@ -613,6 +695,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
}
rcu_read_lock();
{
+ struct fib_table *tbl = NULL;
struct flowi4 fl4 = {
.daddr = nh->nh_gw,
.flowi4_scope = cfg->fc_scope + 1,
@@ -623,8 +706,24 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
/* It is not necessary, but requires a bit of thinking */
if (fl4.flowi4_scope < RT_SCOPE_LINK)
fl4.flowi4_scope = RT_SCOPE_LINK;
- err = fib_lookup(net, &fl4, &res,
- FIB_LOOKUP_IGNORE_LINKSTATE);
+
+ if (cfg->fc_table)
+ tbl = fib_get_table(net, cfg->fc_table);
+
+ if (tbl)
+ err = fib_table_lookup(tbl, &fl4, &res,
+ FIB_LOOKUP_IGNORE_LINKSTATE |
+ FIB_LOOKUP_NOREF);
+
+ /* on error or if no table given do full lookup. This
+ * is needed for example when nexthops are in the local
+ * table rather than the given table
+ */
+ if (!tbl || err) {
+ err = fib_lookup(net, &fl4, &res,
+ FIB_LOOKUP_IGNORE_LINKSTATE);
+ }
+
if (err) {
rcu_read_unlock();
return err;
@@ -760,6 +859,74 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
return nh->nh_saddr;
}
+static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
+{
+ if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
+ fib_prefsrc != cfg->fc_dst) {
+ u32 tb_id = cfg->fc_table;
+ int rc;
+
+ if (tb_id == RT_TABLE_MAIN)
+ tb_id = RT_TABLE_LOCAL;
+
+ rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
+ fib_prefsrc, tb_id);
+
+ if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) {
+ rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
+ fib_prefsrc, RT_TABLE_LOCAL);
+ }
+
+ if (rc != RTN_LOCAL)
+ return false;
+ }
+ return true;
+}
+
+static int
+fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
+{
+ bool ecn_ca = false;
+ struct nlattr *nla;
+ int remaining;
+
+ if (!cfg->fc_mx)
+ return 0;
+
+ nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
+ int type = nla_type(nla);
+ u32 val;
+
+ if (!type)
+ continue;
+ if (type > RTAX_MAX)
+ return -EINVAL;
+
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
+
+ nla_strlcpy(tmp, nla, sizeof(tmp));
+ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ if (val == TCP_CA_UNSPEC)
+ return -EINVAL;
+ } else {
+ val = nla_get_u32(nla);
+ }
+ if (type == RTAX_ADVMSS && val > 65535 - 40)
+ val = 65535 - 40;
+ if (type == RTAX_MTU && val > 65535 - 15)
+ val = 65535 - 15;
+ if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+ return -EINVAL;
+ fi->fib_metrics[type - 1] = val;
+ }
+
+ if (ecn_ca)
+ fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+
+ return 0;
+}
+
struct fib_info *fib_create_info(struct fib_config *cfg)
{
int err;
@@ -832,36 +999,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto failure;
} endfor_nexthops(fi)
- if (cfg->fc_mx) {
- struct nlattr *nla;
- int remaining;
-
- nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
- int type = nla_type(nla);
-
- if (type) {
- u32 val;
-
- if (type > RTAX_MAX)
- goto err_inval;
- if (type == RTAX_CC_ALGO) {
- char tmp[TCP_CA_NAME_MAX];
-
- nla_strlcpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(tmp);
- if (val == TCP_CA_UNSPEC)
- goto err_inval;
- } else {
- val = nla_get_u32(nla);
- }
- if (type == RTAX_ADVMSS && val > 65535 - 40)
- val = 65535 - 40;
- if (type == RTAX_MTU && val > 65535 - 15)
- val = 65535 - 15;
- fi->fib_metrics[type - 1] = val;
- }
- }
- }
+ err = fib_convert_metrics(fi, cfg);
+ if (err)
+ goto failure;
if (cfg->fc_mp) {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -882,6 +1022,22 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
} else {
struct fib_nh *nh = fi->fib_nh;
+ if (cfg->fc_encap) {
+ struct lwtunnel_state *lwtstate;
+ struct net_device *dev = NULL;
+
+ if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+ goto err_inval;
+ if (cfg->fc_oif)
+ dev = __dev_get_by_index(net, cfg->fc_oif);
+ err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+ cfg->fc_encap, AF_INET, cfg,
+ &lwtstate);
+ if (err)
+ goto failure;
+
+ nh->nh_lwtstate = lwtstate_get(lwtstate);
+ }
nh->nh_oif = cfg->fc_oif;
nh->nh_gw = cfg->fc_gw;
nh->nh_flags = cfg->fc_flags;
@@ -940,12 +1096,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_flags |= RTNH_F_LINKDOWN;
}
- if (fi->fib_prefsrc) {
- if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
- fi->fib_prefsrc != cfg->fc_dst)
- if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
- goto err_inval;
- }
+ if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
+ goto err_inval;
change_nexthops(fi) {
fib_info_update_nh_saddr(net, nexthop_nh);
@@ -1055,6 +1207,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
goto nla_put_failure;
#endif
+ if (fi->fib_nh->nh_lwtstate)
+ lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fi->fib_nhs > 1) {
@@ -1090,6 +1244,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
#endif
+ if (nh->nh_lwtstate)
+ lwtunnel_fill_encap(skb, nh->nh_lwtstate);
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
} endfor_nexthops(fi);
@@ -1132,7 +1288,13 @@ int fib_sync_down_addr(struct net *net, __be32 local)
return ret;
}
-int fib_sync_down_dev(struct net_device *dev, unsigned long event)
+/* Event force Flags Description
+ * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
+ * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
+ * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
+ * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
+ */
+int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
{
int ret = 0;
int scope = RT_SCOPE_NOWHERE;
@@ -1141,8 +1303,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
struct hlist_head *head = &fib_info_devhash[hash];
struct fib_nh *nh;
- if (event == NETDEV_UNREGISTER ||
- event == NETDEV_DOWN)
+ if (force)
scope = -1;
hlist_for_each_entry(nh, head, nh_hash) {
@@ -1291,6 +1452,13 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
if (!(dev->flags & IFF_UP))
return 0;
+ if (nh_flags & RTNH_F_DEAD) {
+ unsigned int flags = dev_get_flags(dev);
+
+ if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+ nh_flags |= RTNH_F_LINKDOWN;
+ }
+
prev_fi = NULL;
hash = fib_devindex_hashfn(dev->ifindex);
head = &fib_info_devhash[hash];
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b0c6258ff..744e5936c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -81,6 +81,7 @@
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/switchdev.h>
+#include <trace/events/fib.h>
#include "fib_lookup.h"
#define MAX_STAT_DEPTH 32
@@ -1278,6 +1279,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
unsigned long index;
t_key cindex;
+ trace_fib_table_lookup(tb->tb_id, flp);
+
pn = t->kv;
cindex = 0;
@@ -1423,8 +1426,11 @@ found:
nh->nh_flags & RTNH_F_LINKDOWN &&
!(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
continue;
- if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
- continue;
+ if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
+ if (flp->flowi4_oif &&
+ flp->flowi4_oif != nh->nh_oif)
+ continue;
+ }
if (!(fib_flags & FIB_LOOKUP_NOREF))
atomic_inc(&fi->fib_clntref);
@@ -1439,6 +1445,8 @@ found:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_passed);
#endif
+ trace_fib_table_lookup_nh(nh);
+
return err;
}
}
@@ -1561,7 +1569,7 @@ static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
do {
/* record parent and next child index */
pn = n;
- cindex = key ? get_index(key, pn) : 0;
+ cindex = (key > pn->key) ? get_index(key, pn) : 0;
if (cindex >> pn->bits)
break;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 34968cd5c..e0fcbbbcf 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -79,7 +79,11 @@ static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
__be16 *pd = data;
size_t start = ntohs(pd[0]);
size_t offset = ntohs(pd[1]);
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+ size_t plen = sizeof(struct udphdr) + hdrlen +
+ max_t(size_t, offset + sizeof(u16), start);
+
+ if (skb->remcsum_offload)
+ return guehdr;
if (!pskb_may_pull(skb, plen))
return NULL;
@@ -221,29 +225,21 @@ out_unlock:
static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
struct guehdr *guehdr, void *data,
- size_t hdrlen, u8 ipproto,
- struct gro_remcsum *grc, bool nopartial)
+ size_t hdrlen, struct gro_remcsum *grc,
+ bool nopartial)
{
__be16 *pd = data;
size_t start = ntohs(pd[0]);
size_t offset = ntohs(pd[1]);
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
if (skb->remcsum_offload)
- return NULL;
+ return guehdr;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
- /* Pull checksum that will be written */
- if (skb_gro_header_hard(skb, off + plen)) {
- guehdr = skb_gro_header_slow(skb, off + plen, off);
- if (!guehdr)
- return NULL;
- }
-
- skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
- start, offset, grc, nopartial);
+ guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
+ start, offset, grc, nopartial);
skb->remcsum_offload = 1;
@@ -307,10 +303,10 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
if (flags & GUE_PFLAG_REMCSUM) {
guehdr = gue_gro_remcsum(skb, off, guehdr,
- data + doffset, hdrlen,
- guehdr->proto_ctype, &grc,
+ data + doffset, hdrlen, &grc,
!!(fou->flags &
FOU_F_REMCSUM_NOPARTIAL));
+
if (!guehdr)
goto out;
@@ -351,7 +347,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[guehdr->proto_ctype]);
- if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+ if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb);
@@ -570,7 +566,7 @@ static int parse_nl_config(struct genl_info *info,
if (info->attrs[FOU_ATTR_AF]) {
u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
- if (family != AF_INET && family != AF_INET6)
+ if (family != AF_INET)
return -EINVAL;
cfg->udp_config.family = family;
diff --git a/net/ipv4/geneve_core.c b/net/ipv4/geneve_core.c
deleted file mode 100644
index 311a4ba69..000000000
--- a/net/ipv4/geneve_core.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Geneve: Generic Network Virtualization Encapsulation
- *
- * Copyright (c) 2014 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/igmp.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
-#include <linux/mutex.h>
-#include <net/arp.h>
-#include <net/ndisc.h>
-#include <net/ip.h>
-#include <net/ip_tunnels.h>
-#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/geneve.h>
-#include <net/protocol.h>
-#include <net/udp_tunnel.h>
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/addrconf.h>
-#include <net/ip6_tunnel.h>
-#include <net/ip6_checksum.h>
-#endif
-
-/* Protects sock_list and refcounts. */
-static DEFINE_MUTEX(geneve_mutex);
-
-/* per-network namespace private data for this module */
-struct geneve_net {
- struct list_head sock_list;
-};
-
-static int geneve_net_id;
-
-static struct geneve_sock *geneve_find_sock(struct net *net,
- sa_family_t family, __be16 port)
-{
- struct geneve_net *gn = net_generic(net, geneve_net_id);
- struct geneve_sock *gs;
-
- list_for_each_entry(gs, &gn->sock_list, list) {
- if (inet_sk(gs->sock->sk)->inet_sport == port &&
- inet_sk(gs->sock->sk)->sk.sk_family == family)
- return gs;
- }
-
- return NULL;
-}
-
-static void geneve_build_header(struct genevehdr *geneveh,
- __be16 tun_flags, u8 vni[3],
- u8 options_len, u8 *options)
-{
- geneveh->ver = GENEVE_VER;
- geneveh->opt_len = options_len / 4;
- geneveh->oam = !!(tun_flags & TUNNEL_OAM);
- geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
- geneveh->rsvd1 = 0;
- memcpy(geneveh->vni, vni, 3);
- geneveh->proto_type = htons(ETH_P_TEB);
- geneveh->rsvd2 = 0;
-
- memcpy(geneveh->options, options, options_len);
-}
-
-/* Transmit a fully formatted Geneve frame.
- *
- * When calling this function. The skb->data should point
- * to the geneve header which is fully formed.
- *
- * This function will add other UDP tunnel headers.
- */
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
- struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
- __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
- __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
- bool csum, bool xnet)
-{
- struct genevehdr *gnvh;
- int min_headroom;
- int err;
-
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
- err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
-
- skb = vlan_hwaccel_push_inside(skb);
- if (unlikely(!skb))
- return -ENOMEM;
-
- skb = udp_tunnel_handle_offloads(skb, csum);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
- geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
- return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
- tos, ttl, df, src_port, dst_port, xnet,
- !csum);
-}
-EXPORT_SYMBOL_GPL(geneve_xmit_skb);
-
-static int geneve_hlen(struct genevehdr *gh)
-{
- return sizeof(*gh) + gh->opt_len * 4;
-}
-
-static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
-{
- struct sk_buff *p, **pp = NULL;
- struct genevehdr *gh, *gh2;
- unsigned int hlen, gh_len, off_gnv;
- const struct packet_offload *ptype;
- __be16 type;
- int flush = 1;
-
- off_gnv = skb_gro_offset(skb);
- hlen = off_gnv + sizeof(*gh);
- gh = skb_gro_header_fast(skb, off_gnv);
- if (skb_gro_header_hard(skb, hlen)) {
- gh = skb_gro_header_slow(skb, hlen, off_gnv);
- if (unlikely(!gh))
- goto out;
- }
-
- if (gh->ver != GENEVE_VER || gh->oam)
- goto out;
- gh_len = geneve_hlen(gh);
-
- hlen = off_gnv + gh_len;
- if (skb_gro_header_hard(skb, hlen)) {
- gh = skb_gro_header_slow(skb, hlen, off_gnv);
- if (unlikely(!gh))
- goto out;
- }
-
- flush = 0;
-
- for (p = *head; p; p = p->next) {
- if (!NAPI_GRO_CB(p)->same_flow)
- continue;
-
- gh2 = (struct genevehdr *)(p->data + off_gnv);
- if (gh->opt_len != gh2->opt_len ||
- memcmp(gh, gh2, gh_len)) {
- NAPI_GRO_CB(p)->same_flow = 0;
- continue;
- }
- }
-
- type = gh->proto_type;
-
- rcu_read_lock();
- ptype = gro_find_receive_by_type(type);
- if (!ptype) {
- flush = 1;
- goto out_unlock;
- }
-
- skb_gro_pull(skb, gh_len);
- skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
-
-out_unlock:
- rcu_read_unlock();
-out:
- NAPI_GRO_CB(skb)->flush |= flush;
-
- return pp;
-}
-
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
-{
- struct genevehdr *gh;
- struct packet_offload *ptype;
- __be16 type;
- int gh_len;
- int err = -ENOSYS;
-
- udp_tunnel_gro_complete(skb, nhoff);
-
- gh = (struct genevehdr *)(skb->data + nhoff);
- gh_len = geneve_hlen(gh);
- type = gh->proto_type;
-
- rcu_read_lock();
- ptype = gro_find_complete_by_type(type);
- if (ptype)
- err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
-
- rcu_read_unlock();
- return err;
-}
-
-static void geneve_notify_add_rx_port(struct geneve_sock *gs)
-{
- struct sock *sk = gs->sock->sk;
- sa_family_t sa_family = sk->sk_family;
- int err;
-
- if (sa_family == AF_INET) {
- err = udp_add_offload(&gs->udp_offloads);
- if (err)
- pr_warn("geneve: udp_add_offload failed with status %d\n",
- err);
- }
-}
-
-static void geneve_notify_del_rx_port(struct geneve_sock *gs)
-{
- struct sock *sk = gs->sock->sk;
- sa_family_t sa_family = sk->sk_family;
-
- if (sa_family == AF_INET)
- udp_del_offload(&gs->udp_offloads);
-}
-
-/* Callback from net/ipv4/udp.c to receive packets */
-static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
- struct genevehdr *geneveh;
- struct geneve_sock *gs;
- int opts_len;
-
- /* Need Geneve and inner Ethernet header to be present */
- if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
- goto error;
-
- /* Return packets with reserved bits set */
- geneveh = geneve_hdr(skb);
-
- if (unlikely(geneveh->ver != GENEVE_VER))
- goto error;
-
- if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
- goto error;
-
- opts_len = geneveh->opt_len * 4;
- if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
- htons(ETH_P_TEB)))
- goto drop;
-
- gs = rcu_dereference_sk_user_data(sk);
- if (!gs)
- goto drop;
-
- gs->rcv(gs, skb);
- return 0;
-
-drop:
- /* Consume bad packet */
- kfree_skb(skb);
- return 0;
-
-error:
- /* Let the UDP layer deal with the skb */
- return 1;
-}
-
-static struct socket *geneve_create_sock(struct net *net, bool ipv6,
- __be16 port)
-{
- struct socket *sock;
- struct udp_port_cfg udp_conf;
- int err;
-
- memset(&udp_conf, 0, sizeof(udp_conf));
-
- if (ipv6) {
- udp_conf.family = AF_INET6;
- } else {
- udp_conf.family = AF_INET;
- udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
- }
-
- udp_conf.local_udp_port = port;
-
- /* Open UDP socket */
- err = udp_sock_create(net, &udp_conf, &sock);
- if (err < 0)
- return ERR_PTR(err);
-
- return sock;
-}
-
-/* Create new listen socket if needed */
-static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
- geneve_rcv_t *rcv, void *data,
- bool ipv6)
-{
- struct geneve_net *gn = net_generic(net, geneve_net_id);
- struct geneve_sock *gs;
- struct socket *sock;
- struct udp_tunnel_sock_cfg tunnel_cfg;
-
- gs = kzalloc(sizeof(*gs), GFP_KERNEL);
- if (!gs)
- return ERR_PTR(-ENOMEM);
-
- sock = geneve_create_sock(net, ipv6, port);
- if (IS_ERR(sock)) {
- kfree(gs);
- return ERR_CAST(sock);
- }
-
- gs->sock = sock;
- gs->refcnt = 1;
- gs->rcv = rcv;
- gs->rcv_data = data;
-
- /* Initialize the geneve udp offloads structure */
- gs->udp_offloads.port = port;
- gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
- gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
- geneve_notify_add_rx_port(gs);
-
- /* Mark socket as an encapsulation socket */
- tunnel_cfg.sk_user_data = gs;
- tunnel_cfg.encap_type = 1;
- tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
- tunnel_cfg.encap_destroy = NULL;
- setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
-
- list_add(&gs->list, &gn->sock_list);
-
- return gs;
-}
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
- geneve_rcv_t *rcv, void *data,
- bool no_share, bool ipv6)
-{
- struct geneve_sock *gs;
-
- mutex_lock(&geneve_mutex);
-
- gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
- if (gs) {
- if (!no_share && gs->rcv == rcv)
- gs->refcnt++;
- else
- gs = ERR_PTR(-EBUSY);
- } else {
- gs = geneve_socket_create(net, port, rcv, data, ipv6);
- }
-
- mutex_unlock(&geneve_mutex);
-
- return gs;
-}
-EXPORT_SYMBOL_GPL(geneve_sock_add);
-
-void geneve_sock_release(struct geneve_sock *gs)
-{
- mutex_lock(&geneve_mutex);
-
- if (--gs->refcnt)
- goto unlock;
-
- list_del(&gs->list);
- geneve_notify_del_rx_port(gs);
- udp_tunnel_sock_release(gs->sock);
- kfree_rcu(gs, rcu);
-
-unlock:
- mutex_unlock(&geneve_mutex);
-}
-EXPORT_SYMBOL_GPL(geneve_sock_release);
-
-static __net_init int geneve_init_net(struct net *net)
-{
- struct geneve_net *gn = net_generic(net, geneve_net_id);
-
- INIT_LIST_HEAD(&gn->sock_list);
-
- return 0;
-}
-
-static struct pernet_operations geneve_net_ops = {
- .init = geneve_init_net,
- .id = &geneve_net_id,
- .size = sizeof(struct geneve_net),
-};
-
-static int __init geneve_init_module(void)
-{
- int rc;
-
- rc = register_pernet_subsys(&geneve_net_ops);
- if (rc)
- return rc;
-
- pr_info("Geneve core logic\n");
-
- return 0;
-}
-module_init(geneve_init_module);
-
-static void __exit geneve_cleanup_module(void)
-{
- unregister_pernet_subsys(&geneve_net_ops);
-}
-module_exit(geneve_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
-MODULE_DESCRIPTION("Driver library for GENEVE encapsulated traffic");
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4a7b5b2a1..d9c552a72 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -31,7 +31,6 @@
#include <net/xfrm.h>
static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
-static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX];
int gre_add_protocol(const struct gre_protocol *proto, u8 version)
{
@@ -61,197 +60,6 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
}
EXPORT_SYMBOL_GPL(gre_del_protocol);
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
- int hdr_len)
-{
- struct gre_base_hdr *greh;
-
- skb_push(skb, hdr_len);
-
- skb_reset_transport_header(skb);
- greh = (struct gre_base_hdr *)skb->data;
- greh->flags = tnl_flags_to_gre_flags(tpi->flags);
- greh->protocol = tpi->proto;
-
- if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
- __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
- if (tpi->flags&TUNNEL_SEQ) {
- *ptr = tpi->seq;
- ptr--;
- }
- if (tpi->flags&TUNNEL_KEY) {
- *ptr = tpi->key;
- ptr--;
- }
- if (tpi->flags&TUNNEL_CSUM &&
- !(skb_shinfo(skb)->gso_type &
- (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
- *ptr = 0;
- *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
- skb->len, 0));
- }
- }
-}
-EXPORT_SYMBOL_GPL(gre_build_header);
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- bool *csum_err)
-{
- const struct gre_base_hdr *greh;
- __be32 *options;
- int hdr_len;
-
- if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
- return -EINVAL;
-
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
- if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
- return -EINVAL;
-
- tpi->flags = gre_flags_to_tnl_flags(greh->flags);
- hdr_len = ip_gre_calc_hlen(tpi->flags);
-
- if (!pskb_may_pull(skb, hdr_len))
- return -EINVAL;
-
- greh = (struct gre_base_hdr *)skb_transport_header(skb);
- tpi->proto = greh->protocol;
-
- options = (__be32 *)(greh + 1);
- if (greh->flags & GRE_CSUM) {
- if (skb_checksum_simple_validate(skb)) {
- *csum_err = true;
- return -EINVAL;
- }
-
- skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
- null_compute_pseudo);
-
- options++;
- }
-
- if (greh->flags & GRE_KEY) {
- tpi->key = *options;
- options++;
- } else
- tpi->key = 0;
-
- if (unlikely(greh->flags & GRE_SEQ)) {
- tpi->seq = *options;
- options++;
- } else
- tpi->seq = 0;
-
- /* WCCP version 1 and 2 protocol decoding.
- * - Change protocol to IP
- * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
- */
- if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
- tpi->proto = htons(ETH_P_IP);
- if ((*(u8 *)options & 0xF0) != 0x40) {
- hdr_len += 4;
- if (!pskb_may_pull(skb, hdr_len))
- return -EINVAL;
- }
- }
-
- return iptunnel_pull_header(skb, hdr_len, tpi->proto);
-}
-
-static int gre_cisco_rcv(struct sk_buff *skb)
-{
- struct tnl_ptk_info tpi;
- int i;
- bool csum_err = false;
-
-#ifdef CONFIG_NET_IPGRE_BROADCAST
- if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
- /* Looped back packet, drop it! */
- if (rt_is_output_route(skb_rtable(skb)))
- goto drop;
- }
-#endif
-
- if (parse_gre_header(skb, &tpi, &csum_err) < 0)
- goto drop;
-
- rcu_read_lock();
- for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
- struct gre_cisco_protocol *proto;
- int ret;
-
- proto = rcu_dereference(gre_cisco_proto_list[i]);
- if (!proto)
- continue;
- ret = proto->handler(skb, &tpi);
- if (ret == PACKET_RCVD) {
- rcu_read_unlock();
- return 0;
- }
- }
- rcu_read_unlock();
-
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-drop:
- kfree_skb(skb);
- return 0;
-}
-
-static void gre_cisco_err(struct sk_buff *skb, u32 info)
-{
- /* All the routers (except for Linux) return only
- * 8 bytes of packet payload. It means, that precise relaying of
- * ICMP in the real Internet is absolutely infeasible.
- *
- * Moreover, Cisco "wise men" put GRE key to the third word
- * in GRE header. It makes impossible maintaining even soft
- * state for keyed
- * GRE tunnels with enabled checksum. Tell them "thank you".
- *
- * Well, I wonder, rfc1812 was written by Cisco employee,
- * what the hell these idiots break standards established
- * by themselves???
- */
-
- const int type = icmp_hdr(skb)->type;
- const int code = icmp_hdr(skb)->code;
- struct tnl_ptk_info tpi;
- bool csum_err = false;
- int i;
-
- if (parse_gre_header(skb, &tpi, &csum_err)) {
- if (!csum_err) /* ignore csum errors. */
- return;
- }
-
- if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- skb->dev->ifindex, 0, IPPROTO_GRE, 0);
- return;
- }
- if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
- IPPROTO_GRE, 0);
- return;
- }
-
- rcu_read_lock();
- for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
- struct gre_cisco_protocol *proto;
-
- proto = rcu_dereference(gre_cisco_proto_list[i]);
- if (!proto)
- continue;
-
- if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD)
- goto out;
-
- }
-out:
- rcu_read_unlock();
-}
-
static int gre_rcv(struct sk_buff *skb)
{
const struct gre_protocol *proto;
@@ -302,60 +110,19 @@ static const struct net_protocol net_gre_protocol = {
.netns_ok = 1,
};
-static const struct gre_protocol ipgre_protocol = {
- .handler = gre_cisco_rcv,
- .err_handler = gre_cisco_err,
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *newp)
-{
- struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
- &gre_cisco_proto_list[newp->priority];
-
- return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_register);
-
-int gre_cisco_unregister(struct gre_cisco_protocol *del_proto)
-{
- struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
- &gre_cisco_proto_list[del_proto->priority];
- int ret;
-
- ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL;
-
- if (ret)
- return ret;
-
- synchronize_net();
- return 0;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_unregister);
-
static int __init gre_init(void)
{
pr_info("GRE over IPv4 demultiplexor driver\n");
if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
pr_err("can't add protocol\n");
- goto err;
- }
-
- if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
- pr_info("%s: can't add ipgre handler\n", __func__);
- goto err_gre;
+ return -EAGAIN;
}
-
return 0;
-err_gre:
- inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
-err:
- return -EAGAIN;
}
static void __exit gre_exit(void)
{
- gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 5aa46d4b4..5a8ee3282 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -36,7 +36,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
- SKB_GSO_IPIP)))
+ SKB_GSO_IPIP |
+ SKB_GSO_SIT)))
goto out;
if (!skb->encapsulation)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f5203fba6..e5eb8ac40 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -96,6 +96,7 @@
#include <net/xfrm.h>
#include <net/inet_common.h>
#include <net/ip_fib.h>
+#include <net/vrf.h>
/*
* Build xmit assembly blocks
@@ -308,9 +309,10 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
rc = false;
if (icmp_global_allow()) {
+ int vif = vrf_master_ifindex(dst->dev);
struct inet_peer *peer;
- peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
+ peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
rc = inet_peer_xrlim_allow(peer,
net->ipv4.sysctl_icmp_ratelimit);
if (peer)
@@ -425,6 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
fl4.flowi4_mark = mark;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
+ fl4.flowi4_oif = vrf_master_ifindex(skb->dev);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
@@ -458,6 +461,8 @@ static struct rtable *icmp_route_lookup(struct net *net,
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
+ fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev);
+
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
@@ -480,7 +485,8 @@ static struct rtable *icmp_route_lookup(struct net *net,
if (err)
goto relookup_failed;
- if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) {
+ if (inet_addr_type_dev_table(net, skb_in->dev,
+ fl4_dec.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4_dec);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
@@ -496,6 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
+ skb_dst_set(skb_in, NULL);
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
RT_TOS(tos), rt2->dst.dev);
@@ -828,7 +835,7 @@ static bool icmp_unreach(struct sk_buff *skb)
*/
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
- inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
+ inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
&ip_hdr(skb)->saddr,
icmph->type, icmph->code,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9fdfd9dea..d38b8b61e 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -110,6 +110,9 @@
#define IP_MAX_MEMBERSHIPS 20
#define IP_MAX_MSF 10
+/* IGMP reports for link-local multicast groups are enabled by default */
+int sysctl_igmp_llm_reports __read_mostly = 1;
+
#ifdef CONFIG_IP_MULTICAST
/* Parameter names and values are taken from igmp-v2-06 draft */
@@ -437,6 +440,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
if (pmc->multiaddr == IGMP_ALL_HOSTS)
return skb;
+ if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports)
+ return skb;
isquery = type == IGMPV3_MODE_IS_INCLUDE ||
type == IGMPV3_MODE_IS_EXCLUDE;
@@ -545,6 +550,9 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
for_each_pmc_rcu(in_dev, pmc) {
if (pmc->multiaddr == IGMP_ALL_HOSTS)
continue;
+ if (ipv4_is_local_multicast(pmc->multiaddr) &&
+ !sysctl_igmp_llm_reports)
+ continue;
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE])
type = IGMPV3_MODE_IS_EXCLUDE;
@@ -678,7 +686,11 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
return igmpv3_send_report(in_dev, pmc);
- else if (type == IGMP_HOST_LEAVE_MESSAGE)
+
+ if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
+ return 0;
+
+ if (type == IGMP_HOST_LEAVE_MESSAGE)
dst = IGMP_ALL_ROUTER;
else
dst = group;
@@ -851,6 +863,8 @@ static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
if (group == IGMP_ALL_HOSTS)
return false;
+ if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
+ return false;
rcu_read_lock();
for_each_pmc_rcu(in_dev, im) {
@@ -957,6 +971,9 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
continue;
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
+ if (ipv4_is_local_multicast(im->multiaddr) &&
+ !sysctl_igmp_llm_reports)
+ continue;
spin_lock_bh(&im->lock);
if (im->tm_running)
im->gsquery = im->gsquery && mark;
@@ -1181,6 +1198,8 @@ static void igmp_group_dropped(struct ip_mc_list *im)
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
+ if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
+ return;
reporter = im->reporter;
igmp_stop_timer(im);
@@ -1213,6 +1232,8 @@ static void igmp_group_added(struct ip_mc_list *im)
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
+ if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
+ return;
if (in_dev->dead)
return;
@@ -1518,6 +1539,9 @@ static void ip_mc_rejoin_groups(struct in_device *in_dev)
for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
+ if (ipv4_is_local_multicast(im->multiaddr) &&
+ !sysctl_igmp_llm_reports)
+ continue;
/* a failover is happening and switches
* must be notified immediately
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0cb916542..89120196a 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -343,7 +343,6 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
struct sock *sk2;
const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw = NULL;
- int twrefcnt = 0;
spin_lock(lock);
@@ -371,21 +370,17 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
WARN_ON(!sk_unhashed(sk));
__sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) {
- twrefcnt = inet_twsk_unhash(tw);
+ sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
}
spin_unlock(lock);
- if (twrefcnt)
- inet_twsk_put(tw);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
if (twp) {
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
- inet_twsk_deschedule(tw);
-
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
}
return 0;
@@ -403,13 +398,12 @@ static u32 inet_sk_port_offset(const struct sock *sk)
inet->inet_dport);
}
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
struct inet_ehash_bucket *head;
spinlock_t *lock;
- int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
@@ -420,23 +414,22 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
- if (tw) {
- WARN_ON(sk->sk_hash != tw->tw_hash);
- twrefcnt = inet_twsk_unhash(tw);
+ if (osk) {
+ WARN_ON(sk->sk_hash != osk->sk_hash);
+ sk_nulls_del_node_init_rcu(osk);
}
spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- return twrefcnt;
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb;
if (sk->sk_state != TCP_LISTEN)
- return __inet_hash_nolisten(sk, tw);
+ return __inet_hash_nolisten(sk, osk);
WARN_ON(!sk_unhashed(sk));
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -445,7 +438,6 @@ int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
__sk_nulls_add_node_rcu(sk, &ilb->head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
spin_unlock(&ilb->lock);
- return 0;
}
EXPORT_SYMBOL(__inet_hash);
@@ -492,7 +484,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct inet_bind_bucket *tb;
int ret;
struct net *net = sock_net(sk);
- int twrefcnt = 1;
if (!snum) {
int i, remaining, low, high, port;
@@ -560,19 +551,14 @@ ok:
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
- twrefcnt += __inet_hash_nolisten(sk, tw);
+ __inet_hash_nolisten(sk, (struct sock *)tw);
}
if (tw)
- twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+ inet_twsk_bind_unhash(tw, hinfo);
spin_unlock(&head->lock);
- if (tw) {
- inet_twsk_deschedule(tw);
- while (twrefcnt) {
- twrefcnt--;
- inet_twsk_put(tw);
- }
- }
+ if (tw)
+ inet_twsk_deschedule_put(tw);
ret = 0;
goto out;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2ffbd16b7..c67f9bd76 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -18,28 +18,6 @@
/**
- * inet_twsk_unhash - unhash a timewait socket from established hash
- * @tw: timewait socket
- *
- * unhash a timewait socket from established hash, if hashed.
- * ehash lock must be held by caller.
- * Returns 1 if caller should call inet_twsk_put() after lock release.
- */
-int inet_twsk_unhash(struct inet_timewait_sock *tw)
-{
- if (hlist_nulls_unhashed(&tw->tw_node))
- return 0;
-
- hlist_nulls_del_rcu(&tw->tw_node);
- sk_nulls_node_init(&tw->tw_node);
- /*
- * We cannot call inet_twsk_put() ourself under lock,
- * caller must call it for us.
- */
- return 1;
-}
-
-/**
* inet_twsk_bind_unhash - unhash a timewait socket from bind hash
* @tw: timewait socket
* @hashinfo: hashinfo pointer
@@ -48,35 +26,29 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
* bind hash lock must be held by caller.
* Returns 1 if caller should call inet_twsk_put() after lock release.
*/
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo)
{
struct inet_bind_bucket *tb = tw->tw_tb;
if (!tb)
- return 0;
+ return;
__hlist_del(&tw->tw_bind_node);
tw->tw_tb = NULL;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
- /*
- * We cannot call inet_twsk_put() ourself under lock,
- * caller must call it for us.
- */
- return 1;
+ __sock_put((struct sock *)tw);
}
/* Must be called with locally disabled BHs. */
static void inet_twsk_kill(struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
- struct inet_bind_hashbucket *bhead;
- int refcnt;
- /* Unlink from established hashes. */
spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+ struct inet_bind_hashbucket *bhead;
spin_lock(lock);
- refcnt = inet_twsk_unhash(tw);
+ sk_nulls_del_node_init_rcu((struct sock *)tw);
spin_unlock(lock);
/* Disassociate with bind bucket. */
@@ -84,11 +56,9 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
hashinfo->bhash_size)];
spin_lock(&bhead->lock);
- refcnt += inet_twsk_bind_unhash(tw, hashinfo);
+ inet_twsk_bind_unhash(tw, hashinfo);
spin_unlock(&bhead->lock);
- BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
- atomic_sub(refcnt, &tw->tw_refcnt);
atomic_dec(&tw->tw_dr->tw_count);
inet_twsk_put(tw);
}
@@ -153,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
/*
* Step 2: Hash TW into tcp ehash chain.
* Notes :
- * - tw_refcnt is set to 3 because :
+ * - tw_refcnt is set to 4 because :
* - We have one reference from bhash chain.
* - We have one reference from ehash chain.
+ * - We have one reference from timer.
+ * - One reference for ourself (our caller will release it).
* We can use atomic_set() because prior spin_lock()/spin_unlock()
* committed into memory all tw fields.
*/
- atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+ atomic_set(&tw->tw_refcnt, 4);
inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */
@@ -235,15 +207,19 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
* tcp_input.c to verify this.
*/
-/* This is for handling early-kills of TIME_WAIT sockets. */
-void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+/* This is for handling early-kills of TIME_WAIT sockets.
+ * Warning : consume reference.
+ * Caller should not access tw anymore.
+ */
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
{
if (del_timer_sync(&tw->tw_timer))
inet_twsk_kill(tw);
+ inet_twsk_put(tw);
}
-EXPORT_SYMBOL(inet_twsk_deschedule);
+EXPORT_SYMBOL(inet_twsk_deschedule_put);
-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
+void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
{
/* timeout := RTO * 3.5
*
@@ -271,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
*/
tw->tw_kill = timeo <= 4*HZ;
- if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
- atomic_inc(&tw->tw_refcnt);
+ if (!rearm) {
+ BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
atomic_inc(&tw->tw_dr->tw_count);
+ } else {
+ mod_timer_pending(&tw->tw_timer, jiffies + timeo);
}
}
-EXPORT_SYMBOL_GPL(inet_twsk_schedule);
+EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family)
@@ -311,9 +289,8 @@ restart:
rcu_read_unlock();
local_bh_disable();
- inet_twsk_deschedule(tw);
+ inet_twsk_deschedule_put(tw);
local_bh_enable();
- inet_twsk_put(tw);
goto restart_rcu;
}
/* If the nulls value we got at the end of this lookup is
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 241afd743..86fa45809 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -157,22 +157,6 @@ void __init inet_initpeers(void)
INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
}
-static int addr_compare(const struct inetpeer_addr *a,
- const struct inetpeer_addr *b)
-{
- int i, n = (a->family == AF_INET ? 1 : 4);
-
- for (i = 0; i < n; i++) {
- if (a->addr.a6[i] == b->addr.a6[i])
- continue;
- if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
- return -1;
- return 1;
- }
-
- return 0;
-}
-
#define rcu_deref_locked(X, BASE) \
rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
@@ -188,7 +172,7 @@ static int addr_compare(const struct inetpeer_addr *a,
*stackptr++ = &_base->root; \
for (u = rcu_deref_locked(_base->root, _base); \
u != peer_avl_empty;) { \
- int cmp = addr_compare(_daddr, &u->daddr); \
+ int cmp = inetpeer_addr_cmp(_daddr, &u->daddr); \
if (cmp == 0) \
break; \
if (cmp == -1) \
@@ -215,7 +199,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
int count = 0;
while (u != peer_avl_empty) {
- int cmp = addr_compare(daddr, &u->daddr);
+ int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
if (cmp == 0) {
/* Before taking a reference, check if this entry was
* deleted (refcnt=-1)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 921138f6c..fa7f15305 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -48,6 +48,7 @@
#include <linux/inet.h>
#include <linux/netfilter_ipv4.h>
#include <net/inet_ecn.h>
+#include <net/vrf.h>
/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
* code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -77,6 +78,7 @@ struct ipq {
u8 ecn; /* RFC3168 support */
u16 max_df_size; /* largest frag with DF set seen */
int iif;
+ int vif; /* VRF device index */
unsigned int rid;
struct inet_peer *peer;
};
@@ -99,6 +101,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
struct ip4_create_arg {
struct iphdr *iph;
u32 user;
+ int vif;
};
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
@@ -127,7 +130,8 @@ static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
qp->saddr == arg->iph->saddr &&
qp->daddr == arg->iph->daddr &&
qp->protocol == arg->iph->protocol &&
- qp->user == arg->user;
+ qp->user == arg->user &&
+ qp->vif == arg->vif;
}
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
@@ -144,9 +148,11 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
qp->ecn = ip4_frag_ecn(arg->iph->tos);
qp->saddr = arg->iph->saddr;
qp->daddr = arg->iph->daddr;
+ qp->vif = arg->vif;
qp->user = arg->user;
qp->peer = sysctl_ipfrag_max_dist ?
- inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
+ inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
+ NULL;
}
static void ip4_frag_free(struct inet_frag_queue *q)
@@ -244,7 +250,8 @@ out:
/* Find the correct entry in the "incomplete datagrams" queue for
* this IP datagram, and create new one, if nothing is found.
*/
-static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+static struct ipq *ip_find(struct net *net, struct iphdr *iph,
+ u32 user, int vif)
{
struct inet_frag_queue *q;
struct ip4_create_arg arg;
@@ -252,6 +259,7 @@ static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
arg.iph = iph;
arg.user = user;
+ arg.vif = vif;
hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
@@ -522,7 +530,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
int len;
int ihlen;
int err;
- int sum_truesize;
u8 ecn;
ipq_kill(qp);
@@ -590,32 +597,19 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
add_frag_mem_limit(qp->q.net, clone->truesize);
}
+ skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- sum_truesize = head->truesize;
- for (fp = head->next; fp;) {
- bool headstolen;
- int delta;
- struct sk_buff *next = fp->next;
-
- sum_truesize += fp->truesize;
+ for (fp=head->next; fp; fp = fp->next) {
+ head->data_len += fp->len;
+ head->len += fp->len;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
-
- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
- kfree_skb_partial(fp, headstolen);
- } else {
- if (!skb_shinfo(head)->frag_list)
- skb_shinfo(head)->frag_list = fp;
- head->data_len += fp->len;
- head->len += fp->len;
- head->truesize += fp->truesize;
- }
- fp = next;
+ head->truesize += fp->truesize;
}
- sub_frag_mem_limit(qp->q.net, sum_truesize);
+ sub_frag_mem_limit(qp->q.net, head->truesize);
head->next = NULL;
head->dev = dev;
@@ -662,14 +656,15 @@ out_fail:
/* Process an incoming IP datagram fragment. */
int ip_defrag(struct sk_buff *skb, u32 user)
{
+ struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
+ int vif = vrf_master_ifindex_rcu(dev);
+ struct net *net = dev_net(dev);
struct ipq *qp;
- struct net *net;
- net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
/* Lookup (or create) queue header */
- qp = ip_find(net, ip_hdr(skb), user);
+ qp = ip_find(net, ip_hdr(skb), user, vif);
if (qp) {
int ret;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5fd706473..614521437 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -25,6 +25,7 @@
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/mroute.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
@@ -47,6 +48,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/gre.h>
+#include <net/dst_metadata.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -121,8 +123,127 @@ static int ipgre_tunnel_init(struct net_device *dev);
static int ipgre_net_id __read_mostly;
static int gre_tap_net_id __read_mostly;
-static int ipgre_err(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi)
+static int ip_gre_calc_hlen(__be16 o_flags)
+{
+ int addend = 4;
+
+ if (o_flags & TUNNEL_CSUM)
+ addend += 4;
+ if (o_flags & TUNNEL_KEY)
+ addend += 4;
+ if (o_flags & TUNNEL_SEQ)
+ addend += 4;
+ return addend;
+}
+
+static __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+ __be16 tflags = 0;
+
+ if (flags & GRE_CSUM)
+ tflags |= TUNNEL_CSUM;
+ if (flags & GRE_ROUTING)
+ tflags |= TUNNEL_ROUTING;
+ if (flags & GRE_KEY)
+ tflags |= TUNNEL_KEY;
+ if (flags & GRE_SEQ)
+ tflags |= TUNNEL_SEQ;
+ if (flags & GRE_STRICT)
+ tflags |= TUNNEL_STRICT;
+ if (flags & GRE_REC)
+ tflags |= TUNNEL_REC;
+ if (flags & GRE_VERSION)
+ tflags |= TUNNEL_VERSION;
+
+ return tflags;
+}
+
+static __be16 tnl_flags_to_gre_flags(__be16 tflags)
+{
+ __be16 flags = 0;
+
+ if (tflags & TUNNEL_CSUM)
+ flags |= GRE_CSUM;
+ if (tflags & TUNNEL_ROUTING)
+ flags |= GRE_ROUTING;
+ if (tflags & TUNNEL_KEY)
+ flags |= GRE_KEY;
+ if (tflags & TUNNEL_SEQ)
+ flags |= GRE_SEQ;
+ if (tflags & TUNNEL_STRICT)
+ flags |= GRE_STRICT;
+ if (tflags & TUNNEL_REC)
+ flags |= GRE_REC;
+ if (tflags & TUNNEL_VERSION)
+ flags |= GRE_VERSION;
+
+ return flags;
+}
+
+static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ bool *csum_err)
+{
+ const struct gre_base_hdr *greh;
+ __be32 *options;
+ int hdr_len;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+ return -EINVAL;
+
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+ return -EINVAL;
+
+ tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+ hdr_len = ip_gre_calc_hlen(tpi->flags);
+
+ if (!pskb_may_pull(skb, hdr_len))
+ return -EINVAL;
+
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ tpi->proto = greh->protocol;
+
+ options = (__be32 *)(greh + 1);
+ if (greh->flags & GRE_CSUM) {
+ if (skb_checksum_simple_validate(skb)) {
+ *csum_err = true;
+ return -EINVAL;
+ }
+
+ skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+ null_compute_pseudo);
+ options++;
+ }
+
+ if (greh->flags & GRE_KEY) {
+ tpi->key = *options;
+ options++;
+ } else {
+ tpi->key = 0;
+ }
+ if (unlikely(greh->flags & GRE_SEQ)) {
+ tpi->seq = *options;
+ options++;
+ } else {
+ tpi->seq = 0;
+ }
+ /* WCCP version 1 and 2 protocol decoding.
+ * - Change protocol to IP
+ * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+ */
+ if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+ tpi->proto = htons(ETH_P_IP);
+ if ((*(u8 *)options & 0xF0) != 0x40) {
+ hdr_len += 4;
+ if (!pskb_may_pull(skb, hdr_len))
+ return -EINVAL;
+ }
+ }
+ return iptunnel_pull_header(skb, hdr_len, tpi->proto);
+}
+
+static void ipgre_err(struct sk_buff *skb, u32 info,
+ const struct tnl_ptk_info *tpi)
{
/* All the routers (except for Linux) return only
@@ -148,14 +269,14 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
switch (type) {
default:
case ICMP_PARAMETERPROB:
- return PACKET_RCVD;
+ return;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
- return PACKET_RCVD;
+ return;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -164,9 +285,10 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
break;
}
break;
+
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
- return PACKET_RCVD;
+ return;
break;
case ICMP_REDIRECT:
@@ -183,26 +305,85 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
iph->daddr, iph->saddr, tpi->key);
if (!t)
- return PACKET_REJECT;
+ return;
if (t->parms.iph.daddr == 0 ||
ipv4_is_multicast(t->parms.iph.daddr))
- return PACKET_RCVD;
+ return;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
- return PACKET_RCVD;
+ return;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
- return PACKET_RCVD;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+ /* All the routers (except for Linux) return only
+ * 8 bytes of packet payload. It means, that precise relaying of
+ * ICMP in the real Internet is absolutely infeasible.
+ *
+ * Moreover, Cisco "wise men" put GRE key to the third word
+ * in GRE header. It makes impossible maintaining even soft
+ * state for keyed
+ * GRE tunnels with enabled checksum. Tell them "thank you".
+ *
+ * Well, I wonder, rfc1812 was written by Cisco employee,
+ * what the hell these idiots break standards established
+ * by themselves???
+ */
+
+ const int type = icmp_hdr(skb)->type;
+ const int code = icmp_hdr(skb)->code;
+ struct tnl_ptk_info tpi;
+ bool csum_err = false;
+
+ if (parse_gre_header(skb, &tpi, &csum_err)) {
+ if (!csum_err) /* ignore csum errors. */
+ return;
+ }
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ skb->dev->ifindex, 0, IPPROTO_GRE, 0);
+ return;
+ }
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
+ IPPROTO_GRE, 0);
+ return;
+ }
+
+ ipgre_err(skb, info, &tpi);
+}
+
+static __be64 key_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)((__force u32)key);
+#else
+ return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static __be32 tunnel_id_to_key(__be64 x)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)x;
+#else
+ return (__force __be32)((__force u64)x >> 32);
+#endif
}
static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
{
struct net *net = dev_net(skb->dev);
+ struct metadata_dst *tun_dst = NULL;
struct ip_tunnel_net *itn;
const struct iphdr *iph;
struct ip_tunnel *tunnel;
@@ -218,40 +399,211 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
if (tunnel) {
skb_pop_mac_header(skb);
- ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
+ if (tunnel->collect_md) {
+ __be16 flags;
+ __be64 tun_id;
+
+ flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
+ tun_id = key_to_tunnel_id(tpi->key);
+ tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
+ if (!tun_dst)
+ return PACKET_REJECT;
+ }
+
+ ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
return PACKET_RCVD;
}
return PACKET_REJECT;
}
+static int gre_rcv(struct sk_buff *skb)
+{
+ struct tnl_ptk_info tpi;
+ bool csum_err = false;
+
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+ /* Looped back packet, drop it! */
+ if (rt_is_output_route(skb_rtable(skb)))
+ goto drop;
+ }
+#endif
+
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+ goto drop;
+
+ if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+ return 0;
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
+ __be16 proto, __be32 key, __be32 seq)
+{
+ struct gre_base_hdr *greh;
+
+ skb_push(skb, hdr_len);
+
+ skb_reset_transport_header(skb);
+ greh = (struct gre_base_hdr *)skb->data;
+ greh->flags = tnl_flags_to_gre_flags(flags);
+ greh->protocol = proto;
+
+ if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+ if (flags & TUNNEL_SEQ) {
+ *ptr = seq;
+ ptr--;
+ }
+ if (flags & TUNNEL_KEY) {
+ *ptr = key;
+ ptr--;
+ }
+ if (flags & TUNNEL_CSUM &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+ *ptr = 0;
+ *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
+ skb->len, 0));
+ }
+ }
+}
+
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct tnl_ptk_info tpi;
- tpi.flags = tunnel->parms.o_flags;
- tpi.proto = proto;
- tpi.key = tunnel->parms.o_key;
if (tunnel->parms.o_flags & TUNNEL_SEQ)
tunnel->o_seqno++;
- tpi.seq = htonl(tunnel->o_seqno);
/* Push GRE header. */
- gre_build_header(skb, &tpi, tunnel->tun_hlen);
-
- skb_set_inner_protocol(skb, tpi.proto);
+ build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+ proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+ skb_set_inner_protocol(skb, proto);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
+static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+ bool csum)
+{
+ return iptunnel_handle_offloads(skb, csum,
+ csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+}
+
+static struct rtable *gre_get_rt(struct sk_buff *skb,
+ struct net_device *dev,
+ struct flowi4 *fl,
+ const struct ip_tunnel_key *key)
+{
+ struct net *net = dev_net(dev);
+
+ memset(fl, 0, sizeof(*fl));
+ fl->daddr = key->u.ipv4.dst;
+ fl->saddr = key->u.ipv4.src;
+ fl->flowi4_tos = RT_TOS(key->tos);
+ fl->flowi4_mark = skb->mark;
+ fl->flowi4_proto = IPPROTO_GRE;
+
+ return ip_route_output_key(net, fl);
+}
+
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+ struct flowi4 fl;
+ struct rtable *rt;
+ int min_headroom;
+ int tunnel_hlen;
+ __be16 df, flags;
+ int err;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET))
+ goto err_free_skb;
+
+ key = &tun_info->key;
+ rt = gre_get_rt(skb, dev, &fl, key);
+ if (IS_ERR(rt))
+ goto err_free_skb;
+
+ tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
+
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + tunnel_hlen + sizeof(struct iphdr);
+ if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+ int head_delta = SKB_DATA_ALIGN(min_headroom -
+ skb_headroom(skb) +
+ 16);
+ err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+ 0, GFP_ATOMIC);
+ if (unlikely(err))
+ goto err_free_rt;
+ }
+
+ /* Push Tunnel header. */
+ skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
+ if (IS_ERR(skb)) {
+ skb = NULL;
+ goto err_free_rt;
+ }
+
+ flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+ build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+ tunnel_id_to_key(tun_info->key.tun_id), 0);
+
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
+ key->u.ipv4.dst, IPPROTO_GRE,
+ key->tos, key->ttl, df, false);
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+ return;
+
+err_free_rt:
+ ip_rt_put(rt);
+err_free_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+}
+
+static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ if (ip_tunnel_info_af(info) != AF_INET)
+ return -EINVAL;
+
+ rt = gre_get_rt(skb, dev, &fl4, &info->key);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+ info->key.u.ipv4.src = fl4.saddr;
+ return 0;
+}
+
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
+ if (tunnel->collect_md) {
+ gre_fb_xmit(skb, dev);
+ return NETDEV_TX_OK;
+ }
+
if (dev->header_ops) {
/* Need space for new headers */
if (skb_cow_head(skb, dev->needed_headroom -
@@ -277,7 +629,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
goto out;
__gre_xmit(skb, dev, tnl_params, skb->protocol);
-
return NETDEV_TX_OK;
free_skb:
@@ -292,6 +643,11 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ if (tunnel->collect_md) {
+ gre_fb_xmit(skb, dev);
+ return NETDEV_TX_OK;
+ }
+
skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
if (IS_ERR(skb))
goto out;
@@ -300,7 +656,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
goto free_skb;
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
-
return NETDEV_TX_OK;
free_skb:
@@ -530,10 +885,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
return ip_tunnel_init(dev);
}
-static struct gre_cisco_protocol ipgre_protocol = {
- .handler = ipgre_rcv,
- .err_handler = ipgre_err,
- .priority = 0,
+static const struct gre_protocol ipgre_protocol = {
+ .handler = gre_rcv,
+ .err_handler = gre_err,
};
static int __net_init ipgre_init_net(struct net *net)
@@ -596,8 +950,10 @@ out:
return ipgre_tunnel_validate(tb, data);
}
-static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
- struct ip_tunnel_parm *parms)
+static void ipgre_netlink_parms(struct net_device *dev,
+ struct nlattr *data[],
+ struct nlattr *tb[],
+ struct ip_tunnel_parm *parms)
{
memset(parms, 0, sizeof(*parms));
@@ -635,6 +991,12 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
+
+ if (data[IFLA_GRE_COLLECT_METADATA]) {
+ struct ip_tunnel *t = netdev_priv(dev);
+
+ t->collect_md = true;
+ }
}
/* This function returns true when ENCAP attributes are present in the nl msg */
@@ -688,6 +1050,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
.ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
+ .ndo_fill_metadata_dst = gre_fill_metadata_dst,
};
static void ipgre_tap_setup(struct net_device *dev)
@@ -712,7 +1075,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev,
return err;
}
- ipgre_netlink_parms(data, tb, &p);
+ ipgre_netlink_parms(dev, data, tb, &p);
return ip_tunnel_newlink(dev, tb, &p);
}
@@ -730,7 +1093,7 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- ipgre_netlink_parms(data, tb, &p);
+ ipgre_netlink_parms(dev, data, tb, &p);
return ip_tunnel_changelink(dev, tb, &p);
}
@@ -765,6 +1128,8 @@ static size_t ipgre_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_GRE_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_GRE_COLLECT_METADATA */
+ nla_total_size(0) +
0;
}
@@ -796,6 +1161,11 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
t->encap.flags))
goto nla_put_failure;
+ if (t->collect_md) {
+ if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+ goto nla_put_failure;
+ }
+
return 0;
nla_put_failure:
@@ -817,6 +1187,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
@@ -849,9 +1220,38 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
.get_link_net = ip_tunnel_get_link_net,
};
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+ u8 name_assign_type)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ struct ip_tunnel *t;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &ipgre_tap_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ /* Configure flow based GRE device. */
+ t = netdev_priv(dev);
+ t->collect_md = true;
+
+ err = ipgre_newlink(net, dev, tb, NULL);
+ if (err < 0)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
+
static int __net_init ipgre_tap_init_net(struct net *net)
{
- return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL);
+ return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
static void __net_exit ipgre_tap_exit_net(struct net *net)
@@ -881,7 +1281,7 @@ static int __init ipgre_init(void)
if (err < 0)
goto pnet_tap_faied;
- err = gre_cisco_register(&ipgre_protocol);
+ err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
if (err < 0) {
pr_info("%s: can't add protocol\n", __func__);
goto add_proto_failed;
@@ -900,7 +1300,7 @@ static int __init ipgre_init(void)
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
- gre_cisco_unregister(&ipgre_protocol);
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
add_proto_failed:
unregister_pernet_device(&ipgre_tap_net_ops);
pnet_tap_faied:
@@ -912,7 +1312,7 @@ static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
- gre_cisco_unregister(&ipgre_protocol);
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
unregister_pernet_device(&ipgre_tap_net_ops);
unregister_pernet_device(&ipgre_net_ops);
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 2db4c8773..f4fc8a77a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,6 +146,7 @@
#include <net/xfrm.h>
#include <linux/mroute.h>
#include <linux/netlink.h>
+#include <net/dst_metadata.h>
/*
* Process Router Attention IP option (RFC 2113)
@@ -331,7 +332,7 @@ static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
*/
- if (!skb_dst(skb)) {
+ if (!skb_valid_dst(skb)) {
int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
iph->tos, skb->dev);
if (unlikely(err)) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6bf89a631..0138fada0 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1542,6 +1542,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
struct net *net = sock_net(sk);
struct sk_buff *nskb;
int err;
+ int oif;
if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
return;
@@ -1559,7 +1560,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
daddr = replyopts.opt.opt.faddr;
}
- flowi4_init_output(&fl4, arg->bound_dev_if,
+ oif = arg->bound_dev_if;
+ if (!oif && netif_index_is_vrf(net, skb->skb_iif))
+ oif = skb->skb_iif;
+
+ flowi4_init_output(&fl4, oif,
IP4_REPLY_MARK(net, skb->mark),
RT_TOS(arg->tos),
RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 626d9e56a..cbb51f3fa 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -230,10 +230,13 @@ skip_key_lookup:
if (cand)
return cand;
+ t = rcu_dereference(itn->collect_md_tun);
+ if (t)
+ return t;
+
if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
return netdev_priv(itn->fb_tunnel_dev);
-
return NULL;
}
EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
@@ -261,11 +264,15 @@ static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
{
struct hlist_head *head = ip_bucket(itn, &t->parms);
+ if (t->collect_md)
+ rcu_assign_pointer(itn->collect_md_tun, t);
hlist_add_head_rcu(&t->hash_node, head);
}
-static void ip_tunnel_del(struct ip_tunnel *t)
+static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
{
+ if (t->collect_md)
+ rcu_assign_pointer(itn->collect_md_tun, NULL);
hlist_del_init_rcu(&t->hash_node);
}
@@ -419,7 +426,8 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
}
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
- const struct tnl_ptk_info *tpi, bool log_ecn_error)
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error)
{
struct pcpu_sw_netstats *tstats;
const struct iphdr *iph = ip_hdr(skb);
@@ -478,6 +486,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
skb->dev = tunnel->dev;
}
+ if (tun_dst)
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
@@ -806,7 +817,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
struct ip_tunnel_parm *p,
bool set_mtu)
{
- ip_tunnel_del(t);
+ ip_tunnel_del(itn, t);
t->parms.iph.saddr = p->iph.saddr;
t->parms.iph.daddr = p->iph.daddr;
t->parms.i_key = p->i_key;
@@ -967,7 +978,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
if (itn->fb_tunnel_dev != dev) {
- ip_tunnel_del(netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
unregister_netdevice_queue(dev, head);
}
}
@@ -1072,8 +1083,13 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
nt = netdev_priv(dev);
itn = net_generic(net, nt->ip_tnl_net_id);
- if (ip_tunnel_find(itn, p, dev->type))
- return -EEXIST;
+ if (nt->collect_md) {
+ if (rtnl_dereference(itn->collect_md_tun))
+ return -EEXIST;
+ } else {
+ if (ip_tunnel_find(itn, p, dev->type))
+ return -EEXIST;
+ }
nt->net = net;
nt->parms = *p;
@@ -1089,7 +1105,6 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = mtu;
ip_tunnel_add(itn, nt);
-
out:
return err;
}
@@ -1163,6 +1178,10 @@ int ip_tunnel_init(struct net_device *dev)
iph->version = 4;
iph->ihl = 5;
+ if (tunnel->collect_md) {
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ netif_keep_dst(dev);
+ }
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1176,7 +1195,7 @@ void ip_tunnel_uninit(struct net_device *dev)
itn = net_generic(net, tunnel->ip_tnl_net_id);
/* fb_tunnel_dev will be unregisted in net-exit call. */
if (itn->fb_tunnel_dev != dev)
- ip_tunnel_del(netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
ip_tunnel_dst_reset_all(tunnel);
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6a51a71a6..84dce6a92 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -32,6 +32,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/static_key.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -45,12 +46,13 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/dst_metadata.h>
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
__u8 tos, __u8 ttl, __be16 df, bool xnet)
{
- int pkt_len = skb->len;
+ int pkt_len = skb->len - skb_inner_network_offset(skb);
struct iphdr *iph;
int err;
@@ -118,6 +120,33 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
}
EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
+ gfp_t flags)
+{
+ struct metadata_dst *res;
+ struct ip_tunnel_info *dst, *src;
+
+ if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
+ return NULL;
+
+ res = metadata_dst_alloc(0, flags);
+ if (!res)
+ return NULL;
+
+ dst = &res->u.tun_info;
+ src = &md->u.tun_info;
+ dst->key.tun_id = src->key.tun_id;
+ if (src->mode & IP_TUNNEL_INFO_IPV6)
+ memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
+ sizeof(struct in6_addr));
+ else
+ dst->key.u.ipv4.dst = src->key.u.ipv4.src;
+ dst->mode = src->mode | IP_TUNNEL_INFO_TX;
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
+
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
bool csum_help,
int gso_type_mask)
@@ -190,3 +219,208 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
return tot;
}
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
+ [LWTUNNEL_IP_ID] = { .type = NLA_U64 },
+ [LWTUNNEL_IP_DST] = { .type = NLA_U32 },
+ [LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
+ [LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
+ [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
+};
+
+static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts)
+{
+ struct ip_tunnel_info *tun_info;
+ struct lwtunnel_state *new_state;
+ struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
+ if (err < 0)
+ return err;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ if (!new_state)
+ return -ENOMEM;
+
+ new_state->type = LWTUNNEL_ENCAP_IP;
+
+ tun_info = lwt_tun_info(new_state);
+
+ if (tb[LWTUNNEL_IP_ID])
+ tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
+
+ if (tb[LWTUNNEL_IP_DST])
+ tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
+
+ if (tb[LWTUNNEL_IP_SRC])
+ tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
+
+ if (tb[LWTUNNEL_IP_TTL])
+ tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
+
+ if (tb[LWTUNNEL_IP_TOS])
+ tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
+
+ if (tb[LWTUNNEL_IP_FLAGS])
+ tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
+
+ tun_info->mode = IP_TUNNEL_INFO_TX;
+ tun_info->options_len = 0;
+
+ *ts = new_state;
+
+ return 0;
+}
+
+static int ip_tun_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+ if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
+ nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+ nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+ nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
+ nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
+ nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ return nla_total_size(8) /* LWTUNNEL_IP_ID */
+ + nla_total_size(4) /* LWTUNNEL_IP_DST */
+ + nla_total_size(4) /* LWTUNNEL_IP_SRC */
+ + nla_total_size(1) /* LWTUNNEL_IP_TOS */
+ + nla_total_size(1) /* LWTUNNEL_IP_TTL */
+ + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */
+}
+
+static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ return memcmp(lwt_tun_info(a), lwt_tun_info(b),
+ sizeof(struct ip_tunnel_info));
+}
+
+static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+ .build_state = ip_tun_build_state,
+ .fill_encap = ip_tun_fill_encap_info,
+ .get_encap_size = ip_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
+};
+
+static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+ [LWTUNNEL_IP6_ID] = { .type = NLA_U64 },
+ [LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) },
+ [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
+ [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
+ [LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
+ [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
+};
+
+static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts)
+{
+ struct ip_tunnel_info *tun_info;
+ struct lwtunnel_state *new_state;
+ struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
+ if (err < 0)
+ return err;
+
+ new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+ if (!new_state)
+ return -ENOMEM;
+
+ new_state->type = LWTUNNEL_ENCAP_IP6;
+
+ tun_info = lwt_tun_info(new_state);
+
+ if (tb[LWTUNNEL_IP6_ID])
+ tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
+
+ if (tb[LWTUNNEL_IP6_DST])
+ tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
+
+ if (tb[LWTUNNEL_IP6_SRC])
+ tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
+
+ if (tb[LWTUNNEL_IP6_HOPLIMIT])
+ tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
+
+ if (tb[LWTUNNEL_IP6_TC])
+ tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
+
+ if (tb[LWTUNNEL_IP6_FLAGS])
+ tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
+
+ tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
+ tun_info->options_len = 0;
+
+ *ts = new_state;
+
+ return 0;
+}
+
+static int ip6_tun_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+ if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+ nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
+ nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
+ nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
+ nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+ nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ return nla_total_size(8) /* LWTUNNEL_IP6_ID */
+ + nla_total_size(16) /* LWTUNNEL_IP6_DST */
+ + nla_total_size(16) /* LWTUNNEL_IP6_SRC */
+ + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
+ + nla_total_size(1) /* LWTUNNEL_IP6_TC */
+ + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+ .build_state = ip6_tun_build_state,
+ .fill_encap = ip6_tun_fill_encap_info,
+ .get_encap_size = ip6_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
+};
+
+void __init ip_tunnel_core_init(void)
+{
+ lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+ lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
+}
+
+struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
+
+void ip_tunnel_need_metadata(void)
+{
+ static_key_slow_inc(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
+
+void ip_tunnel_unneed_metadata(void)
+{
+ static_key_slow_dec(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 8e7328c6a..ed4ef09c2 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -94,7 +94,7 @@
/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */
#define CONF_SEND_RETRIES 6 /* Send six requests per open */
-#define CONF_INTER_TIMEOUT (HZ/2) /* Inter-device timeout: 1/2 second */
+#define CONF_INTER_TIMEOUT (HZ) /* Inter-device timeout: 1 second */
#define CONF_BASE_TIMEOUT (HZ*2) /* Initial timeout: 2 seconds */
#define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */
#define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 254238daf..f34c31def 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -198,7 +198,7 @@ static int ipip_rcv(struct sk_buff *skb)
goto drop;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
- return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
}
return -1;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3a2c0162c..8e8203d5c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -233,7 +233,6 @@ static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.match = ipmr_rule_match,
.configure = ipmr_rule_configure,
.compare = ipmr_rule_compare,
- .default_pref = fib_default_rule_pref,
.fill = ipmr_rule_fill,
.nlgroup = RTNLGRP_IPV4_RULE,
.policy = ipmr_rule_policy,
@@ -1683,8 +1682,8 @@ static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
- IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
- IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
+ IP_INC_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP_ADD_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
if (unlikely(opt->optlen))
ip_forward_options(skb);
@@ -1746,7 +1745,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
* to blackhole.
*/
- IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+ IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
ip_rt_put(rt);
goto out_free;
}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2199a5db2..a35584176 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -58,6 +58,12 @@ config NFT_REJECT_IPV4
default NFT_REJECT
tristate
+config NFT_DUP_IPV4
+ tristate "IPv4 nf_tables packet duplication support"
+ select NF_DUP_IPV4
+ help
+ This module enables IPv4 packet duplication support for nf_tables.
+
endif # NF_TABLES_IPV4
config NF_TABLES_ARP
@@ -67,6 +73,13 @@ config NF_TABLES_ARP
endif # NF_TABLES
+config NF_DUP_IPV4
+ tristate "Netfilter IPv4 packet duplication to alternate destination"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
+ help
+ This option enables the nf_dup_ipv4 core, which duplicates an IPv4
+ packet to be rerouted to another destination.
+
config NF_LOG_ARP
tristate "ARP packet logging"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 7fe6c7035..87b073da1 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
+obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
# generic IP tables
@@ -70,3 +71,5 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
# just filtering instance of ARP tables for now
obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
+
+obj-$(CONFIG_NF_DUP_IPV4) += nf_dup_ipv4.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 92305a1a0..8f87fc38c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -240,7 +240,7 @@ get_entry(const void *base, unsigned int offset)
return (struct arpt_entry *)(base + offset);
}
-static inline __pure
+static inline
struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
{
return (void *)entry + entry->next_offset;
@@ -280,6 +280,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
+ /* No TEE support for arptables, so no need to switch to alternate
+ * stack. All targets that reenter must return absolute verdicts.
+ */
e = get_entry(table_base, private->hook_entry[hook]);
acpar.in = state->in;
@@ -325,11 +328,6 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
if (table_base + v
!= arpt_next_entry(e)) {
-
- if (stackidx >= private->stacksize) {
- verdict = NF_DROP;
- break;
- }
jumpstack[stackidx++] = e;
}
@@ -337,9 +335,6 @@ unsigned int arpt_do_table(struct sk_buff *skb,
continue;
}
- /* Targets which reenter must return
- * abs. verdicts
- */
acpar.target = t->u.kernel.target;
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 6c72fbb7b..b0a86e734 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -276,7 +276,7 @@ static void trace_packet(const struct sk_buff *skb,
}
#endif
-static inline __pure
+static inline
struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
{
return (void *)entry + entry->next_offset;
@@ -296,12 +296,13 @@ ipt_do_table(struct sk_buff *skb,
const char *indev, *outdev;
const void *table_base;
struct ipt_entry *e, **jumpstack;
- unsigned int *stackptr, origptr, cpu;
+ unsigned int stackidx, cpu;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
/* Initialization */
+ stackidx = 0;
ip = ip_hdr(skb);
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
@@ -331,13 +332,21 @@ ipt_do_table(struct sk_buff *skb,
smp_read_barrier_depends();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
- stackptr = per_cpu_ptr(private->stackptr, cpu);
- origptr = *stackptr;
+
+ /* Switch to alternate jumpstack if we're being invoked via TEE.
+ * TEE issues XT_CONTINUE verdict on original skb so we must not
+ * clobber the jumpstack.
+ *
+ * For recursion via REJECT or SYNPROXY the stack will be clobbered
+ * but it is no problem since absolute verdict is issued by these.
+ */
+ if (static_key_false(&xt_tee_enabled))
+ jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
e = get_entry(table_base, private->hook_entry[hook]);
- pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
- table->name, hook, origptr,
+ pr_debug("Entering %s(hook %u), UF %p\n",
+ table->name, hook,
get_entry(table_base, private->underflow[hook]));
do {
@@ -383,28 +392,24 @@ ipt_do_table(struct sk_buff *skb,
verdict = (unsigned int)(-v) - 1;
break;
}
- if (*stackptr <= origptr) {
+ if (stackidx == 0) {
e = get_entry(table_base,
private->underflow[hook]);
pr_debug("Underflow (this is normal) "
"to %p\n", e);
} else {
- e = jumpstack[--*stackptr];
+ e = jumpstack[--stackidx];
pr_debug("Pulled %p out from pos %u\n",
- e, *stackptr);
+ e, stackidx);
e = ipt_next_entry(e);
}
continue;
}
if (table_base + v != ipt_next_entry(e) &&
!(e->ip.flags & IPT_F_GOTO)) {
- if (*stackptr >= private->stacksize) {
- verdict = NF_DROP;
- break;
- }
- jumpstack[(*stackptr)++] = e;
+ jumpstack[stackidx++] = e;
pr_debug("Pushed %p into pos %u\n",
- e, *stackptr - 1);
+ e, stackidx - 1);
}
e = get_entry(table_base, v);
@@ -423,9 +428,8 @@ ipt_do_table(struct sk_buff *skb,
/* Verdict */
break;
} while (!acpar.hotdrop);
- pr_debug("Exiting %s; resetting sp from %u to %u\n",
- __func__, *stackptr, origptr);
- *stackptr = origptr;
+ pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
+
xt_write_recseq_end(addend);
local_bh_enable();
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 4bf3dc49a..270765236 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -72,7 +72,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo)
tcph->cwr = einfo->proto.tcp.cwr;
inet_proto_csum_replace2(&tcph->check, skb,
- oldval, ((__be16 *)tcph)[6], 0);
+ oldval, ((__be16 *)tcph)[6], false);
return true;
}
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 8618fd150..c4ffc9de1 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -61,9 +61,7 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
if (FIB_RES_DEV(res) == dev)
dev_match = true;
#endif
- if (dev_match || flags & XT_RPFILTER_LOOSE)
- return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
- return dev_match;
+ return dev_match || flags & XT_RPFILTER_LOOSE;
}
static bool rpfilter_is_local(const struct sk_buff *skb)
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 30ad9554b..8a2caaf39 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -280,7 +280,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
return -EINVAL;
}
- h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+ h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 80d5554b9..cdde3ec49 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -134,9 +134,11 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
struct nf_conntrack_tuple innertuple, origtuple;
const struct nf_conntrack_l4proto *innerproto;
const struct nf_conntrack_tuple_hash *h;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+ const struct nf_conntrack_zone *zone;
+ struct nf_conntrack_zone tmp;
NF_CT_ASSERT(skb->nfct == NULL);
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
/* Are they talking about one of our connections? */
if (!nf_ct_get_tuplepr(skb,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index c88b7d434..9306ec4fa 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -43,22 +43,22 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
- u16 zone = NF_CT_DEFAULT_ZONE;
-
+ u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (skb->nfct)
- zone = nf_ct_zone((struct nf_conn *)skb->nfct);
-#endif
+ if (skb->nfct) {
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
- return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+ }
#endif
+ if (nf_bridge_in_prerouting(skb))
+ return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
+
if (hooknum == NF_INET_PRE_ROUTING)
- return IP_DEFRAG_CONNTRACK_IN + zone;
+ return IP_DEFRAG_CONNTRACK_IN + zone_id;
else
- return IP_DEFRAG_CONNTRACK_OUT + zone;
+ return IP_DEFRAG_CONNTRACK_OUT + zone_id;
}
static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
new file mode 100644
index 000000000..2d79e6e8d
--- /dev/null
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -0,0 +1,121 @@
+/*
+ * (C) 2007 by Sebastian ClaĂŸen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+ const struct dst_entry *dst;
+
+ if (skb->dev != NULL)
+ return dev_net(skb->dev);
+ dst = skb_dst(skb);
+ if (dst != NULL && dst->dev != NULL)
+ return dev_net(dst->dev);
+#endif
+ return &init_net;
+}
+
+static bool nf_dup_ipv4_route(struct sk_buff *skb, const struct in_addr *gw,
+ int oif)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct rtable *rt;
+ struct flowi4 fl4;
+
+ memset(&fl4, 0, sizeof(fl4));
+ if (oif != -1)
+ fl4.flowi4_oif = oif;
+
+ fl4.daddr = gw->s_addr;
+ fl4.flowi4_tos = RT_TOS(iph->tos);
+ fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+ fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt))
+ return false;
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ skb->dev = rt->dst.dev;
+ skb->protocol = htons(ETH_P_IP);
+
+ return true;
+}
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct in_addr *gw, int oif)
+{
+ struct iphdr *iph;
+
+ if (this_cpu_read(nf_skb_duplicated))
+ return;
+ /*
+ * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+ * the original skb, which should continue on its way as if nothing has
+ * happened. The copy should be independently delivered to the gateway.
+ */
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ /* Avoid counting cloned packets towards the original connection. */
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ /*
+ * If we are in PREROUTING/INPUT, the checksum must be recalculated
+ * since the length could have changed as a result of defragmentation.
+ *
+ * We also decrease the TTL to mitigate potential loops between two
+ * hosts.
+ *
+ * Set %IP_DF so that the original source is notified of a potentially
+ * decreased MTU on the clone route. IPv6 does this too.
+ */
+ iph = ip_hdr(skb);
+ iph->frag_off |= htons(IP_DF);
+ if (hooknum == NF_INET_PRE_ROUTING ||
+ hooknum == NF_INET_LOCAL_IN)
+ --iph->ttl;
+ ip_send_check(iph);
+
+ if (nf_dup_ipv4_route(skb, gw, oif)) {
+ __this_cpu_write(nf_skb_duplicated, true);
+ ip_local_out(skb);
+ __this_cpu_write(nf_skb_duplicated, false);
+ } else {
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv4);
+
+MODULE_AUTHOR("Sebastian ClaĂŸen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv4: Duplicate IPv4 packet");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index e59cc05c0..22f4579b0 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -120,7 +120,7 @@ static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
oldip = iph->daddr;
newip = t->dst.u3.ip;
}
- inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+ inet_proto_csum_replace4(check, skb, oldip, newip, true);
}
static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
@@ -151,7 +151,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
}
} else
inet_proto_csum_replace2(check, skb,
- htons(oldlen), htons(datalen), 1);
+ htons(oldlen), htons(datalen), true);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 4557b4ab8..7b98baa13 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -67,7 +67,7 @@ icmp_manip_pkt(struct sk_buff *skb,
hdr = (struct icmphdr *)(skb->data + hdroff);
inet_proto_csum_replace2(&hdr->checksum, skb,
- hdr->un.echo.id, tuple->src.u.icmp.id, 0);
+ hdr->un.echo.id, tuple->src.u.icmp.id, false);
hdr->un.echo.id = tuple->src.u.icmp.id;
return true;
}
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
new file mode 100644
index 000000000..b45932d43
--- /dev/null
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+struct nft_dup_ipv4 {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_dev:8;
+};
+
+static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+ struct in_addr gw = {
+ .s_addr = (__force __be32)regs->data[priv->sreg_addr],
+ };
+ int oif = regs->data[priv->sreg_dev];
+
+ nf_dup_ipv4(pkt->skb, pkt->ops->hooknum, &gw, oif);
+}
+
+static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+ priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+ err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+ priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+ return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+ }
+ return 0;
+}
+
+static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+ nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv4_type;
+static const struct nft_expr_ops nft_dup_ipv4_ops = {
+ .type = &nft_dup_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv4)),
+ .eval = nft_dup_ipv4_eval,
+ .init = nft_dup_ipv4_init,
+ .dump = nft_dup_ipv4_dump,
+};
+
+static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
+ [NFTA_DUP_SREG_ADDR] = { .type = NLA_U32 },
+ [NFTA_DUP_SREG_DEV] = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "dup",
+ .ops = &nft_dup_ipv4_ops,
+ .policy = nft_dup_ipv4_policy,
+ .maxattr = NFTA_DUP_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv4_module_init(void)
+{
+ return nft_register_expr(&nft_dup_ipv4_type);
+}
+
+static void __exit nft_dup_ipv4_module_exit(void)
+{
+ nft_unregister_expr(&nft_dup_ipv4_type);
+}
+
+module_init(nft_dup_ipv4_module_init);
+module_exit(nft_dup_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 05ff44b75..e89094ab5 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -363,7 +363,8 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
scoped);
rcu_read_unlock();
- if (!(isk->freebind || isk->transparent || has_addr ||
+ if (!(net->ipv6.sysctl.ip_nonlocal_bind ||
+ isk->freebind || isk->transparent || has_addr ||
addr_type == IPV6_ADDR_ANY))
return -EADDRNOTAVAIL;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index da5d483e2..3abd9d7a3 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -300,6 +300,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+ SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+ SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e681b852c..c81deb85a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
@@ -102,6 +103,7 @@
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
+#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
@@ -109,6 +111,8 @@
#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
+#include <net/ip_tunnels.h>
+#include <net/vrf.h>
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -834,6 +838,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
struct inet_peer *peer;
struct net *net;
int log_martians;
+ int vif;
rcu_read_lock();
in_dev = __in_dev_get_rcu(rt->dst.dev);
@@ -842,10 +847,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
return;
}
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
+ vif = vrf_master_ifindex_rcu(rt->dst.dev);
rcu_read_unlock();
net = dev_net(rt->dst.dev);
- peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
if (!peer) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
rt_nexthop(rt, ip_hdr(skb)->daddr));
@@ -934,7 +940,8 @@ static int ip_error(struct sk_buff *skb)
break;
}
- peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
+ vrf_master_ifindex(skb->dev), 1);
send = true;
if (peer) {
@@ -1403,6 +1410,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
+ rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr);
else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1546,7 +1554,6 @@ static int __mkroute_input(struct sk_buff *skb,
struct rtable *rth;
int err;
struct in_device *out_dev;
- unsigned int flags = 0;
bool do_cache;
u32 itag = 0;
@@ -1610,7 +1617,7 @@ static int __mkroute_input(struct sk_buff *skb,
}
rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
- rth->rt_flags = flags;
+ rth->rt_flags = 0;
rth->rt_type = res->type;
rth->rt_is_input = 1;
rth->rt_iif = 0;
@@ -1624,6 +1631,14 @@ static int __mkroute_input(struct sk_buff *skb,
rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
+ if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
+ rth->dst.lwtstate->orig_output = rth->dst.output;
+ rth->dst.output = lwtunnel_output;
+ }
+ if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
+ rth->dst.lwtstate->orig_input = rth->dst.input;
+ rth->dst.input = lwtunnel_input;
+ }
skb_dst_set(skb, &rth->dst);
out:
err = 0;
@@ -1662,6 +1677,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
{
struct fib_result res;
struct in_device *in_dev = __in_dev_get_rcu(dev);
+ struct ip_tunnel_info *tun_info;
struct flowi4 fl4;
unsigned int flags = 0;
u32 itag = 0;
@@ -1679,6 +1695,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
by fib_lookup.
*/
+ tun_info = skb_tunnel_info(skb);
+ if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
+ fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
+ else
+ fl4.flowi4_tun_key.tun_id = 0;
+ skb_dst_drop(skb);
+
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
@@ -1710,10 +1733,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
* Now we are ready to route packet.
*/
fl4.flowi4_oif = 0;
- fl4.flowi4_iif = dev->ifindex;
+ fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+ fl4.flowi4_flags = 0;
fl4.daddr = daddr;
fl4.saddr = saddr;
err = fib_lookup(net, &fl4, &res, 0);
@@ -1792,6 +1816,7 @@ local_input:
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+
RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
@@ -1981,7 +2006,6 @@ add:
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
-
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & RTCF_LOCAL)
@@ -2004,6 +2028,8 @@ add:
}
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
+ if (lwtunnel_output_redirect(rth->dst.lwtstate))
+ rth->dst.output = lwtunnel_output;
return rth;
}
@@ -2020,6 +2046,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
struct fib_result res;
struct rtable *rth;
int orig_oif;
+ int err = -ENETUNREACH;
res.tclassid = 0;
res.fi = NULL;
@@ -2110,6 +2137,11 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_HOST);
}
+ if (netif_is_vrf(dev_out) &&
+ !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+ rth = vrf_dev_get_rth(dev_out);
+ goto out;
+ }
}
if (!fl4->daddr) {
@@ -2123,7 +2155,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
goto make_route;
}
- if (fib_lookup(net, fl4, &res, 0)) {
+ err = fib_lookup(net, fl4, &res, 0);
+ if (err) {
res.fi = NULL;
res.table = NULL;
if (fl4->flowi4_oif) {
@@ -2151,7 +2184,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
res.type = RTN_UNICAST;
goto make_route;
}
- rth = ERR_PTR(-ENETUNREACH);
+ rth = ERR_PTR(err);
goto out;
}
@@ -2261,7 +2294,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached);
-
dst_free(new);
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 0330ab2e2..ade773744 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -29,6 +29,7 @@
static int zero;
static int one = 1;
static int four = 4;
+static int thousand = 1000;
static int gso_max_segs = GSO_MAX_SEGS;
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
@@ -47,14 +48,14 @@ static void set_local_port_range(struct net *net, int range[2])
{
bool same_parity = !((range[0] ^ range[1]) & 1);
- write_seqlock(&net->ipv4.ip_local_ports.lock);
+ write_seqlock_bh(&net->ipv4.ip_local_ports.lock);
if (same_parity && !net->ipv4.ip_local_ports.warned) {
net->ipv4.ip_local_ports.warned = true;
pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
}
net->ipv4.ip_local_ports.range[0] = range[0];
net->ipv4.ip_local_ports.range[1] = range[1];
- write_sequnlock(&net->ipv4.ip_local_ports.lock);
+ write_sequnlock_bh(&net->ipv4.ip_local_ports.lock);
}
/* Validate changes from /proc interface. */
@@ -712,6 +713,24 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &gso_max_segs,
},
{
+ .procname = "tcp_pacing_ss_ratio",
+ .data = &sysctl_tcp_pacing_ss_ratio,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &thousand,
+ },
+ {
+ .procname = "tcp_pacing_ca_ratio",
+ .data = &sysctl_tcp_pacing_ca_ratio,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &thousand,
+ },
+ {
.procname = "tcp_autocorking",
.data = &sysctl_tcp_autocorking,
.maxlen = sizeof(int),
@@ -910,6 +929,13 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "igmp_link_local_mcast_reports",
+ .data = &sysctl_igmp_llm_reports,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f6afbdc99..631540add 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -628,6 +628,8 @@ static void skb_entail(struct sock *sk, struct sk_buff *skb)
sk_mem_charge(sk, skb->truesize);
if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
+
+ tcp_slow_start_after_idle_check(sk);
}
static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index c037644ea..fd1405d37 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -146,7 +146,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
bictcp_update(ca, tp->snd_cwnd);
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 8c6fd3d5e..167b6a3e1 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -264,7 +264,7 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 prior_snd_cwnd;
u32 incr;
- if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+ if (tcp_in_slow_start(tp) && hystart_detect)
tcp_cdg_hystart_update(sk);
if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 84be008c9..93c4dc3ab 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -114,16 +114,19 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
}
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
-u32 tcp_ca_get_key_by_name(const char *name)
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
{
const struct tcp_congestion_ops *ca;
- u32 key;
+ u32 key = TCP_CA_UNSPEC;
might_sleep();
rcu_read_lock();
ca = __tcp_ca_find_autoload(name);
- key = ca ? ca->key : TCP_CA_UNSPEC;
+ if (ca) {
+ key = ca->key;
+ *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
+ }
rcu_read_unlock();
return key;
@@ -365,10 +368,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
*/
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
- u32 cwnd = tp->snd_cwnd + acked;
+ u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
- if (cwnd > tp->snd_ssthresh)
- cwnd = tp->snd_ssthresh + 1;
acked -= cwnd - tp->snd_cwnd;
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
@@ -413,7 +414,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
/* In "safe" area, increase. */
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 4f0c402a1..448c2615f 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -154,14 +154,20 @@ static void bictcp_init(struct sock *sk)
static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
- s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
struct bictcp *ca = inet_csk_ca(sk);
+ u32 now = tcp_time_stamp;
+ s32 delta;
+
+ delta = now - tcp_sk(sk)->lsndtime;
/* We were application limited (idle) for a while.
* Shift epoch_start to keep cwnd growth to cubic curve.
*/
- if (ca->epoch_start && delta > 0)
+ if (ca->epoch_start && delta > 0) {
ca->epoch_start += delta;
+ if (after(ca->epoch_start, now))
+ ca->epoch_start = now;
+ }
return;
}
}
@@ -335,7 +341,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
if (hystart && after(ack, ca->end_seq))
bictcp_hystart_reset(sk);
acked = tcp_slow_start(tp, acked);
@@ -454,7 +460,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
ca->delay_min = delay;
/* hystart triggers when cwnd is larger than some threshold */
- if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+ if (hystart && tcp_in_slow_start(tp) &&
tp->snd_cwnd >= hystart_low_window)
hystart_update(sk, delay);
}
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 7092a61c4..7e538f71f 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -209,7 +209,7 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
/* alpha = (1 - g) * alpha + g * F */
- alpha -= alpha >> dctcp_shift_g;
+ alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
if (bytes_ecn) {
/* If dctcp_shift_g == 1, a 32bit value would overflow
* after 8 Mbytes.
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 882c08aae..db7842495 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -116,7 +116,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
/* Update AIMD parameters.
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 58469fff6..82f0d9ed6 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -236,7 +236,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
/* In dangerous area, increase slowly.
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index f963b274f..083831e35 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -112,7 +112,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
rho_fractions = ca->rho_3ls - (ca->rho << 3);
- if (tp->snd_cwnd < tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
/*
* slow start
* INC = 2^RHO - 1
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index f71002e4d..2ab9bbb6f 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -268,7 +268,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
/* In slow start */
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bf0636da8..3d12c5e94 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -112,6 +112,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
#define FLAG_DATA_SACKED 0x20 /* New SACK. */
#define FLAG_ECE 0x40 /* ECE in this ACK */
+#define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -199,11 +200,13 @@ static void tcp_enter_quickack_mode(struct sock *sk)
* and the session is not interactive.
*/
-static inline bool tcp_in_quickack_mode(const struct sock *sk)
+static bool tcp_in_quickack_mode(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
- return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
+ return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
+ (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
}
static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -753,13 +756,29 @@ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
* TCP pacing, to smooth the burst on large writes when packets
* in flight is significantly lower than cwnd (or rwin)
*/
+int sysctl_tcp_pacing_ss_ratio __read_mostly = 200;
+int sysctl_tcp_pacing_ca_ratio __read_mostly = 120;
+
static void tcp_update_pacing_rate(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
u64 rate;
/* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
- rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3);
+ rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
+
+ /* current rate is (cwnd * mss) / srtt
+ * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
+ * In Congestion Avoidance phase, set it to 120 % the current rate.
+ *
+ * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
+ * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
+ * end of slow start and should slow down.
+ */
+ if (tp->snd_cwnd < tp->snd_ssthresh / 2)
+ rate *= sysctl_tcp_pacing_ss_ratio;
+ else
+ rate *= sysctl_tcp_pacing_ca_ratio;
rate *= max(tp->snd_cwnd, tp->packets_out);
@@ -1040,7 +1059,7 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
* highest SACK block). Also calculate the lowest snd_nxt among the remaining
* retransmitted skbs to avoid some costly processing per ACKs.
*/
-static void tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -1081,7 +1100,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
if (after(received_upto, ack_seq)) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
-
+ *flag |= FLAG_LOST_RETRANS;
tcp_skb_mark_lost_uncond_verify(tp, skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
} else {
@@ -1821,7 +1840,7 @@ advance_sp:
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
- tcp_mark_lost_retrans(sk);
+ tcp_mark_lost_retrans(sk, &state->flag);
tcp_verify_left_out(tp);
out:
@@ -2477,15 +2496,14 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
return false;
}
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
* It computes the number of packets to send (sndcnt) based on packets newly
* delivered:
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
* cwnd reductions across a full RTT.
- * 2) If packets in flight is lower than ssthresh (such as due to excess
- * losses and/or application stalls), do not perform any further cwnd
- * reductions, but instead slow start up to ssthresh.
+ * 2) Otherwise PRR uses packet conservation to send as much as delivered.
+ * But when the retransmits are acked without further losses, PRR
+ * slow starts cwnd up to ssthresh to speed up the recovery.
*/
static void tcp_init_cwnd_reduction(struct sock *sk)
{
@@ -2502,7 +2520,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
}
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
- int fast_rexmit)
+ int fast_rexmit, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int sndcnt = 0;
@@ -2511,16 +2529,18 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
(tp->packets_out - tp->sacked_out);
tp->prr_delivered += newly_acked_sacked;
- if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+ if (delta < 0) {
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
tp->prior_cwnd - 1;
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
- } else {
+ } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
+ !(flag & FLAG_LOST_RETRANS)) {
sndcnt = min_t(int, delta,
max_t(int, tp->prr_delivered - tp->prr_out,
newly_acked_sacked) + 1);
+ } else {
+ sndcnt = min(delta, newly_acked_sacked);
}
-
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
}
@@ -2581,7 +2601,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
} else {
- tcp_cwnd_reduction(sk, prior_unsacked, 0);
+ tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
}
}
@@ -2591,6 +2611,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
icsk->icsk_mtup.probe_size = 0;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
}
static void tcp_mtup_probe_success(struct sock *sk)
@@ -2610,6 +2631,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
icsk->icsk_mtup.probe_size = 0;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
}
/* Do a simple retransmit without using the backoff mechanisms in
@@ -2678,7 +2700,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
tp->prior_ssthresh = 0;
tcp_init_undo(tp);
- if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+ if (!tcp_in_cwnd_reduction(sk)) {
if (!ece_ack)
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tcp_init_cwnd_reduction(sk);
@@ -2738,7 +2760,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
/* Undo during fast recovery after partial ACK. */
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
- const int prior_unsacked)
+ const int prior_unsacked, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2754,7 +2776,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
* mark more packets lost or retransmit more.
*/
if (tp->retrans_out) {
- tcp_cwnd_reduction(sk, prior_unsacked, 0);
+ tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
return true;
}
@@ -2841,7 +2863,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
if (tcp_is_reno(tp) && is_dupack)
tcp_add_reno_sack(sk);
} else {
- if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+ if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
return;
/* Partial ACK arrived. Force fast retransmit. */
do_lost = tcp_is_reno(tp) ||
@@ -2854,9 +2876,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack);
- if (icsk->icsk_ca_state != TCP_CA_Open)
+ if (icsk->icsk_ca_state != TCP_CA_Open &&
+ !(flag & FLAG_LOST_RETRANS))
return;
- /* Fall through to processing in Open state. */
+ /* Change state if cwnd is undone or retransmits are lost */
default:
if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED)
@@ -2891,7 +2914,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
if (do_lost)
tcp_update_scoreboard(sk, fast_rexmit);
- tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
+ tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
tcp_xmit_retransmit_queue(sk);
}
@@ -3328,6 +3351,9 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
tp->pred_flags = 0;
tcp_fast_path_check(sk);
+ if (tcp_send_head(sk))
+ tcp_slow_start_after_idle_check(sk);
+
if (nwin > tp->max_window) {
tp->max_window = nwin;
tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
@@ -3565,10 +3591,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
&sack_state);
acked -= tp->packets_out;
- /* Advance cwnd if state allows */
- if (tcp_may_raise_cwnd(sk, flag))
- tcp_cong_avoid(sk, ack, acked);
-
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, prior_unsacked,
@@ -3577,6 +3599,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
+ /* Advance cwnd if state allows */
+ if (tcp_may_raise_cwnd(sk, flag))
+ tcp_cong_avoid(sk, ack, acked);
+
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
struct dst_entry *dst = __sk_dst_get(sk);
if (dst)
@@ -3991,7 +4017,6 @@ void tcp_reset(struct sock *sk)
static void tcp_fin(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- const struct dst_entry *dst;
inet_csk_schedule_ack(sk);
@@ -4003,9 +4028,7 @@ static void tcp_fin(struct sock *sk)
case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */
tcp_set_state(sk, TCP_CLOSE_WAIT);
- dst = __sk_dst_get(sk);
- if (!dst || !dst_metric(dst, RTAX_QUICKACK))
- inet_csk(sk)->icsk_ack.pingpong = 1;
+ inet_csk(sk)->icsk_ack.pingpong = 1;
break;
case TCP_CLOSE_WAIT:
@@ -6066,14 +6089,17 @@ static void tcp_ecn_create_request(struct request_sock *req,
const struct net *net = sock_net(listen_sk);
bool th_ecn = th->ece && th->cwr;
bool ect, ecn_ok;
+ u32 ecn_ok_dst;
if (!th_ecn)
return;
ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
- ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN);
+ ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
+ ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst;
- if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk))
+ if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
+ (ecn_ok_dst & DST_FEATURE_ECN_CA))
inet_rsk(req)->ecn_ok = 1;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1eef463f8..42562a7f8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -223,7 +223,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (err)
goto failure;
- inet_set_txhash(sk);
+ sk_set_txhash(sk);
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
@@ -1293,7 +1293,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->rcv_tos = ip_hdr(skb)->tos;
inet_csk(newsk)->icsk_ext_hdr_len = 0;
- inet_set_txhash(newsk);
+ sk_set_txhash(newsk);
if (inet_opt)
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
newinet->inet_id = newtp->write_seq ^ jiffies;
@@ -1710,8 +1710,7 @@ do_time_wait:
iph->daddr, th->dest,
inet_iif(skb));
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk));
- inet_twsk_put(inet_twsk(sk));
+ inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
goto process;
}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index a51d63a43..c8cbc2b4b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -81,11 +81,7 @@ static void tcp_metric_set(struct tcp_metrics_block *tm,
static bool addr_same(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
- if (a->family != b->family)
- return false;
- if (a->family == AF_INET)
- return a->addr.a4 == b->addr.a4;
- return ipv6_addr_equal(&a->addr.in6, &b->addr.in6);
+ return inetpeer_addr_cmp(a, b) == 0;
}
struct tcpm_hash_bucket {
@@ -247,14 +243,14 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
daddr.family = req->rsk_ops->family;
switch (daddr.family) {
case AF_INET:
- saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
- daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
- hash = (__force unsigned int) daddr.addr.a4;
+ inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
+ inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
+ hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- saddr.addr.in6 = inet_rsk(req)->ir_v6_loc_addr;
- daddr.addr.in6 = inet_rsk(req)->ir_v6_rmt_addr;
+ inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
+ inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
break;
#endif
@@ -285,25 +281,19 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
struct net *net;
if (tw->tw_family == AF_INET) {
- saddr.family = AF_INET;
- saddr.addr.a4 = tw->tw_rcv_saddr;
- daddr.family = AF_INET;
- daddr.addr.a4 = tw->tw_daddr;
- hash = (__force unsigned int) daddr.addr.a4;
+ inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
+ inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
+ hash = ipv4_addr_hash(tw->tw_daddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (tw->tw_family == AF_INET6) {
if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
- saddr.family = AF_INET;
- saddr.addr.a4 = tw->tw_rcv_saddr;
- daddr.family = AF_INET;
- daddr.addr.a4 = tw->tw_daddr;
- hash = (__force unsigned int) daddr.addr.a4;
+ inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
+ inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
+ hash = ipv4_addr_hash(tw->tw_daddr);
} else {
- saddr.family = AF_INET6;
- saddr.addr.in6 = tw->tw_v6_rcv_saddr;
- daddr.family = AF_INET6;
- daddr.addr.in6 = tw->tw_v6_daddr;
+ inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
+ inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
hash = ipv6_addr_hash(&tw->tw_v6_daddr);
}
}
@@ -335,25 +325,19 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
struct net *net;
if (sk->sk_family == AF_INET) {
- saddr.family = AF_INET;
- saddr.addr.a4 = inet_sk(sk)->inet_saddr;
- daddr.family = AF_INET;
- daddr.addr.a4 = inet_sk(sk)->inet_daddr;
- hash = (__force unsigned int) daddr.addr.a4;
+ inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
+ inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
+ hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
- saddr.family = AF_INET;
- saddr.addr.a4 = inet_sk(sk)->inet_saddr;
- daddr.family = AF_INET;
- daddr.addr.a4 = inet_sk(sk)->inet_daddr;
- hash = (__force unsigned int) daddr.addr.a4;
+ inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
+ inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
+ hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
} else {
- saddr.family = AF_INET6;
- saddr.addr.in6 = sk->sk_v6_rcv_saddr;
- daddr.family = AF_INET6;
- daddr.addr.in6 = sk->sk_v6_daddr;
+ inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
+ inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
hash = ipv6_addr_hash(&sk->sk_v6_daddr);
}
}
@@ -461,7 +445,7 @@ void tcp_update_metrics(struct sock *sk)
tcp_metric_set(tm, TCP_METRIC_CWND,
tp->snd_cwnd);
}
- } else if (tp->snd_cwnd > tp->snd_ssthresh &&
+ } else if (!tcp_in_slow_start(tp) &&
icsk->icsk_ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
@@ -796,18 +780,18 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
switch (tm->tcpm_daddr.family) {
case AF_INET:
if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
- tm->tcpm_daddr.addr.a4) < 0)
+ inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
goto nla_put_failure;
if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
- tm->tcpm_saddr.addr.a4) < 0)
+ inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
goto nla_put_failure;
break;
case AF_INET6:
if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
- &tm->tcpm_daddr.addr.in6) < 0)
+ inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
goto nla_put_failure;
if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
- &tm->tcpm_saddr.addr.in6) < 0)
+ inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
goto nla_put_failure;
break;
default:
@@ -956,20 +940,21 @@ static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
a = info->attrs[v4];
if (a) {
- addr->family = AF_INET;
- addr->addr.a4 = nla_get_in_addr(a);
+ inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
if (hash)
- *hash = (__force unsigned int) addr->addr.a4;
+ *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
return 0;
}
a = info->attrs[v6];
if (a) {
+ struct in6_addr in6;
+
if (nla_len(a) != sizeof(struct in6_addr))
return -EINVAL;
- addr->family = AF_INET6;
- addr->addr.in6 = nla_get_in6_addr(a);
+ in6 = nla_get_in6_addr(a);
+ inetpeer_set_addr_v6(addr, &in6);
if (hash)
- *hash = ipv6_addr_hash(&addr->addr.in6);
+ *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
return 0;
}
return optional ? 1 : -EAFNOSUPPORT;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4bc00cb79..def765911 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -147,8 +147,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
if (!th->fin ||
TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
kill_with_rst:
- inet_twsk_deschedule(tw);
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
return TCP_TW_RST;
}
@@ -163,9 +162,9 @@ kill_with_rst:
if (tcp_death_row.sysctl_tw_recycle &&
tcptw->tw_ts_recent_stamp &&
tcp_tw_remember_stamp(tw))
- inet_twsk_schedule(tw, tw->tw_timeout);
+ inet_twsk_reschedule(tw, tw->tw_timeout);
else
- inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
return TCP_TW_ACK;
}
@@ -198,12 +197,11 @@ kill_with_rst:
*/
if (sysctl_tcp_rfc1337 == 0) {
kill:
- inet_twsk_deschedule(tw);
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
return TCP_TW_SUCCESS;
}
}
- inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
@@ -253,7 +251,7 @@ kill:
* Do not reschedule in the last case.
*/
if (paws_reject || th->ack)
- inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
+ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
@@ -324,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
} while (0);
#endif
- /* Linkage updates. */
- __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
-
/* Get the TIME_WAIT timeout firing. */
if (timeo < rto)
timeo = rto;
@@ -340,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
}
inet_twsk_schedule(tw, timeo);
+ /* Linkage updates. */
+ __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
inet_twsk_put(tw);
} else {
/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 81c640fe0..f5ecbbbc6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -137,12 +137,12 @@ static __u16 tcp_advertise_mss(struct sock *sk)
}
/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
- * This is the first part of cwnd validation mechanism. */
-static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
+ * This is the first part of cwnd validation mechanism.
+ */
+void tcp_cwnd_restart(struct sock *sk, s32 delta)
{
struct tcp_sock *tp = tcp_sk(sk);
- s32 delta = tcp_time_stamp - tp->lsndtime;
- u32 restart_cwnd = tcp_init_cwnd(tp, dst);
+ u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
u32 cwnd = tp->snd_cwnd;
tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
@@ -163,20 +163,17 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
{
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
- const struct dst_entry *dst = __sk_dst_get(sk);
- if (sysctl_tcp_slow_start_after_idle &&
- (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
- tcp_cwnd_restart(sk, __sk_dst_get(sk));
+ if (tcp_packets_in_flight(tp) == 0)
+ tcp_ca_event(sk, CA_EVENT_TX_START);
tp->lsndtime = now;
/* If it is a reply for ato after last received
* packet, enter pingpong mode.
*/
- if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
- (!dst || !dst_metric(dst, RTAX_QUICKACK)))
- icsk->icsk_ack.pingpong = 1;
+ if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+ icsk->icsk_ack.pingpong = 1;
}
/* Account for an ACK we sent. */
@@ -953,9 +950,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
- if (tcp_packets_in_flight(tp) == 0)
- tcp_ca_event(sk, CA_EVENT_TX_START);
-
/* if no packet is in qdisc/device queue, then allow XPS to select
* another queue. We can be called from tcp_tsq_handler()
* which holds one reference to sk_wmem_alloc.
@@ -1783,7 +1777,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
- if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
+ if (icsk->icsk_ca_state >= TCP_CA_Recovery)
goto send_now;
/* Avoid bursty behavior by allowing defer
@@ -2158,7 +2152,7 @@ repair:
tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
}
- return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
+ return !tp->packets_out && tcp_send_head(sk);
}
bool tcp_schedule_loss_probe(struct sock *sk)
@@ -2235,7 +2229,7 @@ static bool skb_still_in_host_queue(const struct sock *sk,
return false;
}
-/* When probe timeout (PTO) fires, send a new segment if one exists, else
+/* When probe timeout (PTO) fires, try send a new segment if possible, else
* retransmit the last segment.
*/
void tcp_send_loss_probe(struct sock *sk)
@@ -2244,11 +2238,19 @@ void tcp_send_loss_probe(struct sock *sk)
struct sk_buff *skb;
int pcount;
int mss = tcp_current_mss(sk);
- int err = -1;
- if (tcp_send_head(sk)) {
- err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
- goto rearm_timer;
+ skb = tcp_send_head(sk);
+ if (skb) {
+ if (tcp_snd_wnd_test(tp, skb, mss)) {
+ pcount = tp->packets_out;
+ tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
+ if (tp->packets_out > pcount)
+ goto probe_sent;
+ goto rearm_timer;
+ }
+ skb = tcp_write_queue_prev(sk, skb);
+ } else {
+ skb = tcp_write_queue_tail(sk);
}
/* At most one outstanding TLP retransmission. */
@@ -2256,7 +2258,6 @@ void tcp_send_loss_probe(struct sock *sk)
goto rearm_timer;
/* Retransmit last segment. */
- skb = tcp_write_queue_tail(sk);
if (WARN_ON(!skb))
goto rearm_timer;
@@ -2271,26 +2272,24 @@ void tcp_send_loss_probe(struct sock *sk)
if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
GFP_ATOMIC)))
goto rearm_timer;
- skb = tcp_write_queue_tail(sk);
+ skb = tcp_write_queue_next(sk, skb);
}
if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
goto rearm_timer;
- err = __tcp_retransmit_skb(sk, skb);
+ if (__tcp_retransmit_skb(sk, skb))
+ goto rearm_timer;
/* Record snd_nxt for loss detection. */
- if (likely(!err))
- tp->tlp_high_seq = tp->snd_nxt;
+ tp->tlp_high_seq = tp->snd_nxt;
+probe_sent:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
+ /* Reset s.t. tcp_rearm_rto will restart timer from now */
+ inet_csk(sk)->icsk_pending = 0;
rearm_timer:
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
-
- if (likely(!err))
- NET_INC_STATS_BH(sock_net(sk),
- LINUX_MIB_TCPLOSSPROBES);
+ tcp_rearm_rto(sk);
}
/* Push out any pending frames which were held back due to
@@ -3421,7 +3420,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
skb_mstamp_get(&skb->skb_mstamp);
- NET_INC_STATS_BH(sock_net(sk), mib);
+ NET_INC_STATS(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 333bcb241..bf5ea9e9b 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -22,7 +22,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 5b752f58a..7149ebc82 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -649,4 +649,3 @@ void tcp_init_xmit_timers(struct sock *sk)
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
}
-EXPORT_SYMBOL(tcp_init_xmit_timers);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a6cea1d5e..13951c408 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -225,7 +225,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
*/
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
- if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (diff > gamma && tcp_in_slow_start(tp)) {
/* Going too fast. Time to slow down
* and switch to congestion avoidance.
*/
@@ -240,7 +240,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
- } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ } else if (tcp_in_slow_start(tp)) {
/* Slow start. */
tcp_slow_start(tp, acked);
} else {
@@ -281,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
vegas->minRTT = 0x7fffffff;
}
/* Use normal slow start */
- else if (tp->snd_cwnd <= tp->snd_ssthresh)
+ else if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
}
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 112151eee..0d094b995 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -150,7 +150,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
+ if (tcp_in_slow_start(tp)) {
/* Slow start. */
tcp_slow_start(tp, acked);
} else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b8c5ba7d..f7d1d5e19 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1013,11 +1013,32 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!rt) {
struct net *net = sock_net(sk);
+ __u8 flow_flags = inet_sk_flowi_flags(sk);
fl4 = &fl4_stack;
+
+ /* unconnected socket. If output device is enslaved to a VRF
+ * device lookup source address from VRF table. This mimics
+ * behavior of ip_route_connect{_init}.
+ */
+ if (netif_index_is_vrf(net, ipc.oif)) {
+ flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
+ RT_SCOPE_UNIVERSE, sk->sk_protocol,
+ (flow_flags | FLOWI_FLAG_VRFSRC |
+ FLOWI_FLAG_SKIP_NH_OIF),
+ faddr, saddr, dport,
+ inet->inet_sport);
+
+ rt = ip_route_output_flow(net, fl4, sk);
+ if (!IS_ERR(rt)) {
+ saddr = fl4->saddr;
+ ip_rt_put(rt);
+ }
+ }
+
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
- inet_sk_flowi_flags(sk),
+ flow_flags,
faddr, saddr, dport, inet->inet_sport);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 933ea903f..aba428626 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -4,9 +4,10 @@
#include <linux/udp.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <net/dst_metadata.h>
+#include <net/net_namespace.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
-#include <net/net_namespace.h>
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
@@ -103,4 +104,26 @@ void udp_tunnel_sock_release(struct socket *sock)
}
EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
+ __be16 flags, __be64 tunnel_id, int md_size)
+{
+ struct metadata_dst *tun_dst;
+ struct ip_tunnel_info *info;
+
+ if (family == AF_INET)
+ tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
+ else
+ tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
+ if (!tun_dst)
+ return NULL;
+
+ info = &tun_dst->u.tun_info;
+ info->key.tp_src = udp_hdr(skb)->source;
+ info->key.tp_dst = udp_hdr(skb)->dest;
+ if (udp_hdr(skb)->check)
+ info->key.tun_flags |= TUNNEL_CSUM;
+ return tun_dst;
+}
+EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
+
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 2878dbfff..41a261355 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -30,6 +30,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu) {
+ skb->protocol = htons(ETH_P_IP);
+
if (skb->sk)
xfrm_local_error(skb, mtu);
else
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index bff69746e..c10a9ee68 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,11 +15,12 @@
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/ip.h>
+#include <net/vrf.h>
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
- int tos,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
@@ -28,9 +29,12 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
+ fl4->flowi4_oif = oif;
if (saddr)
fl4->saddr = saddr->a4;
+ fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF;
+
rt = __ip_route_output_key(net, fl4);
if (!IS_ERR(rt))
return &rt->dst;
@@ -38,22 +42,22 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
return ERR_CAST(rt);
}
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct flowi4 fl4;
- return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
+ return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
}
-static int xfrm4_get_saddr(struct net *net,
+static int xfrm4_get_saddr(struct net *net, int oif,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct flowi4 fl4;
- dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
+ dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
@@ -106,8 +110,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
struct flowi4 *fl4 = &fl->u.ip4;
int oif = 0;
- if (skb_dst(skb))
- oif = skb_dst(skb)->dev->ifindex;
+ if (skb_dst(skb)) {
+ oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
+ : skb_dst(skb)->dev->ifindex;
+ }
memset(fl4, 0, sizeof(struct flowi4));
fl4->flowi4_mark = skb->mark;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 438a73aa7..983bb9997 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -5,16 +5,15 @@
# IPv6 as module will cause a CRASH if you try to unload it
menuconfig IPV6
tristate "The IPv6 protocol"
- default m
+ default y
---help---
- This is complemental support for the IP version 6.
- You will still be able to do traditional IPv4 networking as well.
+ Support for IP version 6 (IPv6).
For general information about IPv6, see
<https://en.wikipedia.org/wiki/IPv6>.
- For Linux IPv6 development information, see <http://www.linux-ipv6.org>.
- For specific information about IPv6 under Linux, read the HOWTO at
- <http://www.bieringer.de/linux/IPv6/>.
+ For specific information about IPv6 under Linux, see
+ Documentation/networking/ipv6.txt and read the HOWTO at
+ <http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
To compile this protocol support as a module, choose M here: the
module will be called ipv6.
@@ -93,6 +92,25 @@ config IPV6_MIP6
If unsure, say N.
+config IPV6_ILA
+ tristate "IPv6: Identifier Locator Addressing (ILA)"
+ select LWTUNNEL
+ ---help---
+ Support for IPv6 Identifier Locator Addressing (ILA).
+
+ ILA is a mechanism to do network virtualization without
+ encapsulation. The basic concept of ILA is that we split an
+ IPv6 address into a 64 bit locator and 64 bit identifier. The
+ identifier is the identity of an entity in communication
+ ("who") and the locator expresses the location of the
+ entity ("where").
+
+ ILA can be configured using the "encap ila" option with
+ "ip -6 route" command. ILA is described in
+ https://tools.ietf.org/html/draft-herbert-nvo3-ila-00.
+
+ If unsure, say N.
+
config INET6_XFRM_TUNNEL
tristate
select INET6_TUNNEL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 0f3f19997..2c900c7b7 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o
obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
obj-$(CONFIG_IPV6_MIP6) += mip6.o
+obj-$(CONFIG_IPV6_ILA) += ila.o
obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 21c2c818d..dd0082886 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_from_local = 0,
+ .accept_ra_min_hop_limit= 1,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@@ -211,7 +212,9 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_ra_mtu = 1,
.stable_secret = {
.initialized = false,
- }
+ },
+ .use_oif_addrs_only = 0,
+ .ignore_routes_with_linkdown = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -236,6 +239,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.max_addresses = IPV6_MAX_ADDRESSES,
.accept_ra_defrtr = 1,
.accept_ra_from_local = 0,
+ .accept_ra_min_hop_limit= 1,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@@ -253,6 +257,8 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.stable_secret = {
.initialized = false,
},
+ .use_oif_addrs_only = 0,
+ .ignore_routes_with_linkdown = 0,
};
/* Check if a valid qdisc is available */
@@ -411,6 +417,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (err) {
ipv6_mc_destroy_dev(ndev);
del_timer(&ndev->regen_timer);
+ snmp6_unregister_dev(ndev);
goto err_release;
}
/* protected by rtnl_lock */
@@ -468,6 +475,9 @@ static int inet6_netconf_msgsize_devconf(int type)
if (type == -1 || type == NETCONFA_PROXY_NEIGH)
size += nla_total_size(4);
+ if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
+ size += nla_total_size(4);
+
return size;
}
@@ -504,6 +514,11 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
goto nla_put_failure;
+ if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
+ nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+ devconf->ignore_routes_with_linkdown) < 0)
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -540,6 +555,7 @@ static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
[NETCONFA_FORWARDING] = { .len = sizeof(int) },
[NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
+ [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
};
static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@ -762,6 +778,63 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
rt6_purge_dflt_routers(net);
return 1;
}
+
+static void addrconf_linkdown_change(struct net *net, __s32 newf)
+{
+ struct net_device *dev;
+ struct inet6_dev *idev;
+
+ for_each_netdev(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (idev) {
+ int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
+
+ idev->cnf.ignore_routes_with_linkdown = newf;
+ if (changed)
+ inet6_netconf_notify_devconf(dev_net(dev),
+ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+ dev->ifindex,
+ &idev->cnf);
+ }
+ }
+}
+
+static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
+{
+ struct net *net;
+ int old;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ net = (struct net *)table->extra2;
+ old = *p;
+ *p = newf;
+
+ if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
+ if ((!newf) ^ (!old))
+ inet6_netconf_notify_devconf(net,
+ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+ NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt);
+ rtnl_unlock();
+ return 0;
+ }
+
+ if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
+ net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+ addrconf_linkdown_change(net, newf);
+ if ((!newf) ^ (!old))
+ inet6_netconf_notify_devconf(net,
+ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+ NETCONFA_IFINDEX_ALL,
+ net->ipv6.devconf_all);
+ }
+ rtnl_unlock();
+
+ return 1;
+}
+
#endif
/* Nobody refers to this ifaddr, destroy it */
@@ -1358,15 +1431,96 @@ out:
return ret;
}
+static int __ipv6_dev_get_saddr(struct net *net,
+ struct ipv6_saddr_dst *dst,
+ struct inet6_dev *idev,
+ struct ipv6_saddr_score *scores,
+ int hiscore_idx)
+{
+ struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
+ int i;
+
+ /*
+ * - Tentative Address (RFC2462 section 5.4)
+ * - A tentative address is not considered
+ * "assigned to an interface" in the traditional
+ * sense, unless it is also flagged as optimistic.
+ * - Candidate Source Address (section 4)
+ * - In any case, anycast addresses, multicast
+ * addresses, and the unspecified address MUST
+ * NOT be included in a candidate set.
+ */
+ if ((score->ifa->flags & IFA_F_TENTATIVE) &&
+ (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
+ continue;
+
+ score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+
+ if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
+ score->addr_type & IPV6_ADDR_MULTICAST)) {
+ net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
+ idev->dev->name);
+ continue;
+ }
+
+ score->rule = -1;
+ bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+
+ for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
+ int minihiscore, miniscore;
+
+ minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
+ miniscore = ipv6_get_saddr_eval(net, score, dst, i);
+
+ if (minihiscore > miniscore) {
+ if (i == IPV6_SADDR_RULE_SCOPE &&
+ score->scopedist > 0) {
+ /*
+ * special case:
+ * each remaining entry
+ * has too small (not enough)
+ * scope, because ifa entries
+ * are sorted by their scope
+ * values.
+ */
+ goto out;
+ }
+ break;
+ } else if (minihiscore < miniscore) {
+ if (hiscore->ifa)
+ in6_ifa_put(hiscore->ifa);
+
+ in6_ifa_hold(score->ifa);
+
+ swap(hiscore, score);
+ hiscore_idx = 1 - hiscore_idx;
+
+ /* restore our iterator */
+ score->ifa = hiscore->ifa;
+
+ break;
+ }
+ }
+ }
+out:
+ read_unlock_bh(&idev->lock);
+ return hiscore_idx;
+}
+
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
const struct in6_addr *daddr, unsigned int prefs,
struct in6_addr *saddr)
{
- struct ipv6_saddr_score scores[2],
- *score = &scores[0], *hiscore = &scores[1];
+ struct ipv6_saddr_score scores[2], *hiscore;
struct ipv6_saddr_dst dst;
+ struct inet6_dev *idev;
struct net_device *dev;
int dst_type;
+ bool use_oif_addr = false;
+ int hiscore_idx = 0;
dst_type = __ipv6_addr_type(daddr);
dst.addr = daddr;
@@ -1375,105 +1529,50 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
dst.prefs = prefs;
- hiscore->rule = -1;
- hiscore->ifa = NULL;
+ scores[hiscore_idx].rule = -1;
+ scores[hiscore_idx].ifa = NULL;
rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- struct inet6_dev *idev;
-
- /* Candidate Source Address (section 4)
- * - multicast and link-local destination address,
- * the set of candidate source address MUST only
- * include addresses assigned to interfaces
- * belonging to the same link as the outgoing
- * interface.
- * (- For site-local destination addresses, the
- * set of candidate source addresses MUST only
- * include addresses assigned to interfaces
- * belonging to the same site as the outgoing
- * interface.)
- */
- if (((dst_type & IPV6_ADDR_MULTICAST) ||
- dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
- dst.ifindex && dev->ifindex != dst.ifindex)
- continue;
-
- idev = __in6_dev_get(dev);
- if (!idev)
- continue;
-
- read_lock_bh(&idev->lock);
- list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
- int i;
-
- /*
- * - Tentative Address (RFC2462 section 5.4)
- * - A tentative address is not considered
- * "assigned to an interface" in the traditional
- * sense, unless it is also flagged as optimistic.
- * - Candidate Source Address (section 4)
- * - In any case, anycast addresses, multicast
- * addresses, and the unspecified address MUST
- * NOT be included in a candidate set.
- */
- if ((score->ifa->flags & IFA_F_TENTATIVE) &&
- (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
- continue;
-
- score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+ /* Candidate Source Address (section 4)
+ * - multicast and link-local destination address,
+ * the set of candidate source address MUST only
+ * include addresses assigned to interfaces
+ * belonging to the same link as the outgoing
+ * interface.
+ * (- For site-local destination addresses, the
+ * set of candidate source addresses MUST only
+ * include addresses assigned to interfaces
+ * belonging to the same site as the outgoing
+ * interface.)
+ * - "It is RECOMMENDED that the candidate source addresses
+ * be the set of unicast addresses assigned to the
+ * interface that will be used to send to the destination
+ * (the 'outgoing' interface)." (RFC 6724)
+ */
+ if (dst_dev) {
+ idev = __in6_dev_get(dst_dev);
+ if ((dst_type & IPV6_ADDR_MULTICAST) ||
+ dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+ (idev && idev->cnf.use_oif_addrs_only)) {
+ use_oif_addr = true;
+ }
+ }
- if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
- score->addr_type & IPV6_ADDR_MULTICAST)) {
- net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
- dev->name);
+ if (use_oif_addr) {
+ if (idev)
+ hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+ } else {
+ for_each_netdev_rcu(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
continue;
- }
-
- score->rule = -1;
- bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
-
- for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
- int minihiscore, miniscore;
-
- minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
- miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
-
- if (minihiscore > miniscore) {
- if (i == IPV6_SADDR_RULE_SCOPE &&
- score->scopedist > 0) {
- /*
- * special case:
- * each remaining entry
- * has too small (not enough)
- * scope, because ifa entries
- * are sorted by their scope
- * values.
- */
- goto try_nextdev;
- }
- break;
- } else if (minihiscore < miniscore) {
- if (hiscore->ifa)
- in6_ifa_put(hiscore->ifa);
-
- in6_ifa_hold(score->ifa);
-
- swap(hiscore, score);
-
- /* restore our iterator */
- score->ifa = hiscore->ifa;
-
- break;
- }
- }
+ hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
}
-try_nextdev:
- read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
+ hiscore = &scores[hiscore_idx];
if (!hiscore->ifa)
return -EADDRNOTAVAIL;
@@ -1845,37 +1944,6 @@ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
__ipv6_dev_ac_dec(ifp->idev, &addr);
}
-static int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
-{
- if (dev->addr_len != ETH_ALEN)
- return -1;
- memcpy(eui, dev->dev_addr, 3);
- memcpy(eui + 5, dev->dev_addr + 3, 3);
-
- /*
- * The zSeries OSA network cards can be shared among various
- * OS instances, but the OSA cards have only one MAC address.
- * This leads to duplicate address conflicts in conjunction
- * with IPv6 if more than one instance uses the same card.
- *
- * The driver for these cards can deliver a unique 16-bit
- * identifier for each instance sharing the same card. It is
- * placed instead of 0xFFFE in the interface identifier. The
- * "u" bit of the interface identifier is not inverted in this
- * case. Hence the resulting interface identifier has local
- * scope according to RFC2373.
- */
- if (dev->dev_id) {
- eui[3] = (dev->dev_id >> 8) & 0xFF;
- eui[4] = dev->dev_id & 0xFF;
- } else {
- eui[3] = 0xFF;
- eui[4] = 0xFE;
- eui[0] ^= 2;
- }
- return 0;
-}
-
static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
{
if (dev->addr_len != IEEE802154_ADDR_LEN)
@@ -3052,6 +3120,8 @@ static void addrconf_gre_config(struct net_device *dev)
}
addrconf_addr_gen(idev, true);
+ if (dev->flags & IFF_POINTOPOINT)
+ addrconf_add_mroute(dev);
}
#endif
@@ -3558,7 +3628,7 @@ static void addrconf_dad_work(struct work_struct *w)
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
- ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
+ ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
out:
in6_ifa_put(ifp);
rtnl_unlock();
@@ -4560,6 +4630,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
+ array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
#ifdef CONFIG_IPV6_ROUTER_PREF
array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
@@ -4585,7 +4656,9 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
+ array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
/* we omit DEVCONF_STABLE_SECRET for now */
+ array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
}
static inline size_t inet6_ifla6_size(void)
@@ -4605,6 +4678,7 @@ static inline size_t inet6_if_nlmsg_size(void)
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ + nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
}
@@ -4624,18 +4698,24 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
}
static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
- int items, int bytes, size_t syncpoff)
+ int bytes, size_t syncpoff)
{
- int i;
- int pad = bytes - sizeof(u64) * items;
+ int i, c;
+ u64 buff[IPSTATS_MIB_MAX];
+ int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
+
BUG_ON(pad < 0);
- /* Use put_unaligned() because stats may not be aligned for u64. */
- put_unaligned(items, &stats[0]);
- for (i = 1; i < items; i++)
- put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
+ memset(buff, 0, sizeof(buff));
+ buff[0] = IPSTATS_MIB_MAX;
- memset(&stats[items], 0, pad);
+ for_each_possible_cpu(c) {
+ for (i = 1; i < IPSTATS_MIB_MAX; i++)
+ buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
+ }
+
+ memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
+ memset(&stats[IPSTATS_MIB_MAX], 0, pad);
}
static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
@@ -4643,8 +4723,8 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
{
switch (attrtype) {
case IFLA_INET6_STATS:
- __snmp6_fill_stats64(stats, idev->stats.ipv6,
- IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
+ __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
+ offsetof(struct ipstats_mib, syncp));
break;
case IFLA_INET6_ICMP6STATS:
__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
@@ -4861,7 +4941,9 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
(dev->ifindex != dev_get_iflink(dev) &&
- nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
+ nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+ nla_put_u8(skb, IFLA_OPERSTATE,
+ netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
goto nla_put_failure;
protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
if (!protoinfo)
@@ -5048,13 +5130,12 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
ifp->idev->dev, 0, 0);
- if (rt && ip6_del_rt(rt))
- dst_free(&rt->dst);
+ if (rt)
+ ip6_del_rt(rt);
}
dst_hold(&ifp->rt->dst);
- if (ip6_del_rt(ifp->rt))
- dst_free(&ifp->rt->dst);
+ ip6_del_rt(ifp->rt);
rt_genid_bump_ipv6(net);
break;
@@ -5306,6 +5387,34 @@ out:
return err;
}
+static
+int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
+ int write,
+ void __user *buffer,
+ size_t *lenp,
+ loff_t *ppos)
+{
+ int *valp = ctl->data;
+ int val = *valp;
+ loff_t pos = *ppos;
+ struct ctl_table lctl;
+ int ret;
+
+ /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
+ * we should not modify it until we get the rtnl lock.
+ */
+ lctl = *ctl;
+ lctl.data = &val;
+
+ ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
+
+ if (write)
+ ret = addrconf_fixup_linkdown(ctl, valp, val);
+ if (ret)
+ *ppos = pos;
+ return ret;
+}
+
static struct addrconf_sysctl_table
{
struct ctl_table_header *sysctl_header;
@@ -5456,6 +5565,13 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec,
},
{
+ .procname = "accept_ra_min_hop_limit",
+ .data = &ipv6_devconf.accept_ra_min_hop_limit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "accept_ra_pinfo",
.data = &ipv6_devconf.accept_ra_pinfo,
.maxlen = sizeof(int),
@@ -5585,6 +5701,20 @@ static struct addrconf_sysctl_table
.proc_handler = addrconf_sysctl_stable_secret,
},
{
+ .procname = "use_oif_addrs_only",
+ .data = &ipv6_devconf.use_oif_addrs_only,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "ignore_routes_with_linkdown",
+ .data = &ipv6_devconf.ignore_routes_with_linkdown,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
+ },
+ {
/* sentinel */
}
},
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index ca09bf49a..bfa941fc1 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -107,7 +107,16 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v)
}
EXPORT_SYMBOL(inet6addr_notifier_call_chain);
-const struct ipv6_stub *ipv6_stub __read_mostly;
+static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+ struct dst_entry **u2,
+ struct flowi6 *u3)
+{
+ return -EAFNOSUPPORT;
+}
+
+const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+ .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+};
EXPORT_SYMBOL_GPL(ipv6_stub);
/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7de52b651..44bb66bde 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -197,6 +197,7 @@ lookup_protocol:
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
+ np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
/* Init the ipv4 part of the socket since we can have sockets
@@ -342,7 +343,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
*/
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
- if (!(inet->freebind || inet->transparent) &&
+ if (!net->ipv6.sysctl.ip_nonlocal_bind &&
+ !(inet->freebind || inet->transparent) &&
!ipv6_chk_addr(net, &addr->sin6_addr,
dev, 0)) {
err = -EADDRNOTAVAIL;
@@ -679,8 +681,8 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
const struct ipv6_pinfo *np = inet6_sk(sk);
if (np->rxopt.all) {
- if ((opt->hop && (np->rxopt.bits.hopopts ||
- np->rxopt.bits.ohopopts)) ||
+ if (((opt->flags & IP6SKB_HOPBYHOP) &&
+ (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
(ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
np->rxopt.bits.rxflow) ||
(opt->srcrt && (np->rxopt.bits.srcrt ||
@@ -766,10 +768,10 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.bindv6only = 0;
net->ipv6.sysctl.icmpv6_time = 1*HZ;
net->ipv6.sysctl.flowlabel_consistency = 1;
- net->ipv6.sysctl.auto_flowlabels = 0;
+ net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS;
net->ipv6.sysctl.idgen_retries = 3;
net->ipv6.sysctl.idgen_delay = 1 * HZ;
- net->ipv6.sysctl.flowlabel_state_ranges = 1;
+ net->ipv6.sysctl.flowlabel_state_ranges = 0;
atomic_set(&net->ipv6.fib6_sernum, 1);
err = ipv6_init_mibs(net);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ed7d4e3f9..0630a4d5d 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -577,8 +577,10 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
ahp->icv_trunc_len + seqhi_len);
- if (!work_iph)
+ if (!work_iph) {
+ err = -ENOMEM;
goto out;
+ }
auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b10a88986..9aadd5780 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -199,7 +199,7 @@ ipv4_connected:
NULL);
sk->sk_state = TCP_ESTABLISHED;
- ip6_set_txhash(sk);
+ sk_set_txhash(sk);
out:
fl6_sock_release(flowlabel);
return err;
@@ -568,8 +568,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
}
/* HbH is allowed only once */
- if (np->rxopt.bits.hopopts && opt->hop) {
- u8 *ptr = nh + opt->hop;
+ if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+ u8 *ptr = nh + sizeof(struct ipv6hdr);
put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
}
@@ -630,8 +630,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
int hlim = ipv6_hdr(skb)->hop_limit;
put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
}
- if (np->rxopt.bits.ohopopts && opt->hop) {
- u8 *ptr = nh + opt->hop;
+ if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+ u8 *ptr = nh + sizeof(struct ipv6hdr);
put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
}
if (np->rxopt.bits.odstopts && opt->dst0) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index a7bbbe455..ce203b040 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -632,7 +632,7 @@ int ipv6_parse_hopopts(struct sk_buff *skb)
return -1;
}
- opt->hop = sizeof(struct ipv6hdr);
+ opt->flags |= IP6SKB_HOPBYHOP;
if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 2367a16ea..ed33abf57 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,6 +32,7 @@ struct fib6_rule {
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
+ struct rt6_info *rt;
struct fib_lookup_arg arg = {
.lookup_ptr = lookup,
.flags = FIB_LOOKUP_NOREF,
@@ -40,11 +41,21 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
- if (arg.result)
- return arg.result;
+ rt = arg.result;
- dst_hold(&net->ipv6.ip6_null_entry->dst);
- return &net->ipv6.ip6_null_entry->dst;
+ if (!rt) {
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return &net->ipv6.ip6_null_entry->dst;
+ }
+
+ if (rt->rt6i_flags & RTF_REJECT &&
+ rt->dst.error == -EAGAIN) {
+ ip6_rt_put(rt);
+ rt = net->ipv6.ip6_null_entry;
+ dst_hold(&rt->dst);
+ }
+
+ return &rt->dst;
}
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -258,11 +269,6 @@ nla_put_failure:
return -ENOBUFS;
}
-static u32 fib6_rule_default_pref(struct fib_rules_ops *ops)
-{
- return 0x3FFF;
-}
-
static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
{
return nla_total_size(16) /* dst */
@@ -279,7 +285,6 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.configure = fib6_rule_configure,
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
- .default_pref = fib6_rule_default_pref,
.nlmsg_payload = fib6_rule_nlmsg_payload,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = fib6_rule_policy,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 713d7434c..6c2b2132c 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -329,7 +329,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
struct flowi6 fl2;
int err;
- err = ip6_dst_lookup(sk, &dst, fl6);
+ err = ip6_dst_lookup(net, sk, &dst, fl6);
if (err)
return ERR_PTR(err);
@@ -361,7 +361,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
if (err)
goto relookup_failed;
- err = ip6_dst_lookup(sk, &dst2, &fl2);
+ err = ip6_dst_lookup(net, sk, &dst2, &fl2);
if (err)
goto relookup_failed;
@@ -591,7 +591,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
- err = ip6_dst_lookup(sk, &dst, &fl6);
+ err = ip6_dst_lookup(net, sk, &dst, &fl6);
if (err)
goto out;
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c
new file mode 100644
index 000000000..678d2df4b
--- /dev/null
+++ b/net/ipv6/ila.c
@@ -0,0 +1,229 @@
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/ip6_fib.h>
+#include <net/lwtunnel.h>
+#include <net/protocol.h>
+#include <uapi/linux/ila.h>
+
+struct ila_params {
+ __be64 locator;
+ __be64 locator_match;
+ __wsum csum_diff;
+};
+
+static inline struct ila_params *ila_params_lwtunnel(
+ struct lwtunnel_state *lwstate)
+{
+ return (struct ila_params *)lwstate->data;
+}
+
+static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
+{
+ __be32 diff[] = {
+ ~from[0], ~from[1], to[0], to[1],
+ };
+
+ return csum_partial(diff, sizeof(diff), 0);
+}
+
+static inline __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
+{
+ if (*(__be64 *)&ip6h->daddr == p->locator_match)
+ return p->csum_diff;
+ else
+ return compute_csum_diff8((__be32 *)&ip6h->daddr,
+ (__be32 *)&p->locator);
+}
+
+static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+ __wsum diff;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ size_t nhoff = sizeof(struct ipv6hdr);
+
+ /* First update checksum */
+ switch (ip6h->nexthdr) {
+ case NEXTHDR_TCP:
+ if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
+ struct tcphdr *th = (struct tcphdr *)
+ (skb_network_header(skb) + nhoff);
+
+ diff = get_csum_diff(ip6h, p);
+ inet_proto_csum_replace_by_diff(&th->check, skb,
+ diff, true);
+ }
+ break;
+ case NEXTHDR_UDP:
+ if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) {
+ struct udphdr *uh = (struct udphdr *)
+ (skb_network_header(skb) + nhoff);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ diff = get_csum_diff(ip6h, p);
+ inet_proto_csum_replace_by_diff(&uh->check, skb,
+ diff, true);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
+ break;
+ case NEXTHDR_ICMP:
+ if (likely(pskb_may_pull(skb,
+ nhoff + sizeof(struct icmp6hdr)))) {
+ struct icmp6hdr *ih = (struct icmp6hdr *)
+ (skb_network_header(skb) + nhoff);
+
+ diff = get_csum_diff(ip6h, p);
+ inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
+ diff, true);
+ }
+ break;
+ }
+
+ /* Now change destination address */
+ *(__be64 *)&ip6h->daddr = p->locator;
+}
+
+static int ila_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ goto drop;
+
+ update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+ return dst->lwtstate->orig_output(sk, skb);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int ila_input(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ goto drop;
+
+ update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+ return dst->lwtstate->orig_input(skb);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+ [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+};
+
+static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts)
+{
+ struct ila_params *p;
+ struct nlattr *tb[ILA_ATTR_MAX + 1];
+ size_t encap_len = sizeof(*p);
+ struct lwtunnel_state *newts;
+ const struct fib6_config *cfg6 = cfg;
+ int ret;
+
+ if (family != AF_INET6)
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
+ ila_nl_policy);
+ if (ret < 0)
+ return ret;
+
+ if (!tb[ILA_ATTR_LOCATOR])
+ return -EINVAL;
+
+ newts = lwtunnel_state_alloc(encap_len);
+ if (!newts)
+ return -ENOMEM;
+
+ newts->len = encap_len;
+ p = ila_params_lwtunnel(newts);
+
+ p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+
+ if (cfg6->fc_dst_len > sizeof(__be64)) {
+ /* Precompute checksum difference for translation since we
+ * know both the old locator and the new one.
+ */
+ p->locator_match = *(__be64 *)&cfg6->fc_dst;
+ p->csum_diff = compute_csum_diff8(
+ (__be32 *)&p->locator_match, (__be32 *)&p->locator);
+ }
+
+ newts->type = LWTUNNEL_ENCAP_ILA;
+ newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
+ LWTUNNEL_STATE_INPUT_REDIRECT;
+
+ *ts = newts;
+
+ return 0;
+}
+
+static int ila_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct ila_params *p = ila_params_lwtunnel(lwtstate);
+
+ if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ /* No encapsulation overhead */
+ return 0;
+}
+
+static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ struct ila_params *a_p = ila_params_lwtunnel(a);
+ struct ila_params *b_p = ila_params_lwtunnel(b);
+
+ return (a_p->locator != b_p->locator);
+}
+
+static const struct lwtunnel_encap_ops ila_encap_ops = {
+ .build_state = ila_build_state,
+ .output = ila_output,
+ .input = ila_input,
+ .fill_encap = ila_fill_encap_info,
+ .get_encap_size = ila_encap_nlsize,
+ .cmp_encap = ila_encap_cmp,
+};
+
+static int __init ila_init(void)
+{
+ return lwtunnel_encap_add_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+static void __exit ila_fini(void)
+{
+ lwtunnel_encap_del_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b4fd96de9..6ac8dad01 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -207,7 +207,6 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
struct sock *sk2;
const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw = NULL;
- int twrefcnt = 0;
spin_lock(lock);
@@ -234,21 +233,17 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
WARN_ON(!sk_unhashed(sk));
__sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) {
- twrefcnt = inet_twsk_unhash(tw);
+ sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
}
spin_unlock(lock);
- if (twrefcnt)
- inet_twsk_put(tw);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
if (twp) {
*twp = tw;
} else if (tw) {
/* Silly. Should hash-dance instead... */
- inet_twsk_deschedule(tw);
-
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
}
return 0;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 548c6237b..6cedc62b2 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -32,6 +32,7 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
+#include <net/lwtunnel.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
@@ -154,6 +155,11 @@ static void node_free(struct fib6_node *fn)
kmem_cache_free(fib6_node_kmem, fn);
}
+static void rt6_rcu_free(struct rt6_info *rt)
+{
+ call_rcu(&rt->dst.rcu_head, dst_rcu_free);
+}
+
static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
{
int cpu;
@@ -168,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
pcpu_rt = *ppcpu_rt;
if (pcpu_rt) {
- dst_free(&pcpu_rt->dst);
+ rt6_rcu_free(pcpu_rt);
*ppcpu_rt = NULL;
}
}
@@ -180,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
{
if (atomic_dec_and_test(&rt->rt6i_ref)) {
rt6_free_pcpu(rt);
- dst_free(&rt->dst);
+ rt6_rcu_free(rt);
}
}
@@ -279,7 +285,17 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
- return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+ struct rt6_info *rt;
+
+ rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+ if (rt->rt6i_flags & RTF_REJECT &&
+ rt->dst.error == -EAGAIN) {
+ ip6_rt_put(rt);
+ rt = net->ipv6.ip6_null_entry;
+ dst_hold(&rt->dst);
+ }
+
+ return &rt->dst;
}
static void __net_init fib6_tables_init(struct net *net)
@@ -845,7 +861,7 @@ add:
*ins = rt;
rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
if (!(fn->fn_flags & RTN_RTINFO)) {
@@ -871,7 +887,7 @@ add:
rt->rt6i_node = fn;
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
@@ -932,6 +948,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
int replace_required = 0;
int sernum = fib6_new_sernum(info->nl_net);
+ if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) &&
+ !atomic_read(&rt->dst.__refcnt)))
+ return -EINVAL;
+
if (info->nlh) {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
allow_create = 0;
@@ -1024,6 +1044,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
fib6_start_gc(info->nl_net, rt);
if (!(rt->rt6i_flags & RTF_CACHE))
fib6_prune_clones(info->nl_net, pn);
+ rt->dst.flags &= ~DST_NOCACHE;
}
out:
@@ -1048,7 +1069,8 @@ out:
atomic_inc(&pn->leaf->rt6i_ref);
}
#endif
- dst_free(&rt->dst);
+ if (!(rt->dst.flags & DST_NOCACHE))
+ dst_free(&rt->dst);
}
return err;
@@ -1059,7 +1081,8 @@ out:
st_failure:
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
fib6_repair_tree(info->nl_net, fn);
- dst_free(&rt->dst);
+ if (!(rt->dst.flags & DST_NOCACHE))
+ dst_free(&rt->dst);
return err;
#endif
}
@@ -1409,7 +1432,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
fib6_purge_rt(rt, fn, net);
- inet6_rt_notify(RTM_DELROUTE, rt, info);
+ inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
rt6_release(rt);
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 69f4f689f..3c7b9310b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -404,13 +404,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_tlv_tnl_enc_lim *tel;
__u32 mtu;
case ICMPV6_DEST_UNREACH:
- net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
+ t->parms.name);
break;
case ICMPV6_TIME_EXCEED:
if (code == ICMPV6_EXC_HOPLIMIT) {
- net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+ t->parms.name);
}
break;
case ICMPV6_PARAMPROB:
@@ -421,12 +421,12 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (teli && teli == be32_to_cpu(info) - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
- net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+ t->parms.name);
}
} else {
- net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+ t->parms.name);
}
break;
case ICMPV6_PKT_TOOBIG:
@@ -634,20 +634,20 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
}
if (!fl6->flowi6_mark)
- dst = ip6_tnl_dst_check(tunnel);
+ dst = ip6_tnl_dst_get(tunnel);
if (!dst) {
- ndst = ip6_route_output(net, NULL, fl6);
+ dst = ip6_route_output(net, NULL, fl6);
- if (ndst->error)
+ if (dst->error)
goto tx_err_link_failure;
- ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
- if (IS_ERR(ndst)) {
- err = PTR_ERR(ndst);
- ndst = NULL;
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ dst = NULL;
goto tx_err_link_failure;
}
- dst = ndst;
+ ndst = dst;
}
tdev = dst->dev;
@@ -702,12 +702,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb = new_skb;
}
- if (fl6->flowi6_mark) {
- skb_dst_set(skb, dst);
- ndst = NULL;
- } else {
- skb_dst_set_noref(skb, dst);
- }
+ if (!fl6->flowi6_mark && ndst)
+ ip6_tnl_dst_set(tunnel, ndst);
+ skb_dst_set(skb, dst);
proto = NEXTHDR_GRE;
if (encap_limit >= 0) {
@@ -729,7 +726,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
*/
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
- ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+ ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
ipv6h->hop_limit = tunnel->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
@@ -762,14 +759,12 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb_set_inner_protocol(skb, protocol);
ip6tunnel_xmit(NULL, skb, dev);
- if (ndst)
- ip6_tnl_dst_store(tunnel, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
- dst_release(ndst);
+ dst_release(dst);
return err;
}
@@ -1183,7 +1178,8 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
ip6_flow_hdr(ipv6h, 0,
ip6_make_flowlabel(dev_net(dev), skb,
- t->fl.u.ip6.flowlabel, false));
+ t->fl.u.ip6.flowlabel, true,
+ &t->fl.u.ip6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
@@ -1222,6 +1218,9 @@ static const struct net_device_ops ip6gre_netdev_ops = {
static void ip6gre_dev_free(struct net_device *dev)
{
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ ip6_tnl_dst_destroy(t);
free_percpu(dev->tstats);
free_netdev(dev);
}
@@ -1244,9 +1243,10 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
netif_keep_dst(dev);
}
-static int ip6gre_tunnel_init(struct net_device *dev)
+static int ip6gre_tunnel_init_common(struct net_device *dev)
{
struct ip6_tnl *tunnel;
+ int ret;
tunnel = netdev_priv(dev);
@@ -1254,16 +1254,37 @@ static int ip6gre_tunnel_init(struct net_device *dev)
tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ ret = ip6_tnl_dst_init(tunnel);
+ if (ret) {
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ip6gre_tunnel_init(struct net_device *dev)
+{
+ struct ip6_tnl *tunnel;
+ int ret;
+
+ ret = ip6gre_tunnel_init_common(dev);
+ if (ret)
+ return ret;
+
+ tunnel = netdev_priv(dev);
+
memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
if (ipv6_addr_any(&tunnel->parms.raddr))
dev->header_ops = &ip6gre_header_ops;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
return 0;
}
@@ -1459,19 +1480,16 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
static int ip6gre_tap_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
+ int ret;
- tunnel = netdev_priv(dev);
+ ret = ip6gre_tunnel_init_common(dev);
+ if (ret)
+ return ret;
- tunnel->dev = dev;
- tunnel->net = dev_net(dev);
- strcpy(tunnel->parms.name, dev->name);
+ tunnel = netdev_priv(dev);
ip6gre_tnl_link_config(tunnel, 1);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
return 0;
}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 57990c929..adba03ac7 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -45,6 +45,7 @@
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/inet_ecn.h>
+#include <net/dst_metadata.h>
int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
{
@@ -55,7 +56,7 @@ int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
if (ipprot && ipprot->early_demux)
ipprot->early_demux(skb);
}
- if (!skb_dst(skb))
+ if (!skb_valid_dst(skb))
ip6_route_input(skb);
return dst_input(skb);
@@ -98,7 +99,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
* arrived via the sending interface (ethX), because of the
* nature of scoping architecture. --yoshfuji
*/
- IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+ IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
goto err;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d5f771666..f84ec4e9b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -207,7 +207,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hlimit = ip6_dst_hoplimit(dst);
ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
- np->autoflowlabel));
+ np->autoflowlabel, fl6));
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
@@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb)
if (skb->pkt_type != PACKET_HOST)
goto drop;
+ if (unlikely(skb->sk))
+ goto drop;
+
if (skb_warn_if_lro(skb))
goto drop;
@@ -581,25 +584,29 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
if (np->frag_size)
mtu = np->frag_size;
}
+ if (mtu < hlen + sizeof(struct frag_hdr) + 8)
+ goto fail_toobig;
mtu -= hlen + sizeof(struct frag_hdr);
frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr);
+ hroom = LL_RESERVED_SPACE(rt->dst.dev);
if (skb_has_frag_list(skb)) {
int first_len = skb_pagelen(skb);
struct sk_buff *frag2;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
- skb_cloned(skb))
+ skb_cloned(skb) ||
+ skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
- skb_headroom(frag) < hlen)
+ skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
goto slow_path_clean;
/* Partially cloned skb? */
@@ -616,8 +623,6 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
err = 0;
offset = 0;
- frag = skb_shinfo(skb)->frag_list;
- skb_frag_list_init(skb);
/* BUILD HEADER */
*prevhdr = NEXTHDR_FRAGMENT;
@@ -625,8 +630,11 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
if (!tmp_hdr) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto fail;
}
+ frag = skb_shinfo(skb)->frag_list;
+ skb_frag_list_init(skb);
__skb_pull(skb, hlen);
fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
@@ -723,7 +731,6 @@ slow_path:
*/
*prevhdr = NEXTHDR_FRAGMENT;
- hroom = LL_RESERVED_SPACE(rt->dst.dev);
troom = rt->dst.dev->needed_tailroom;
/*
@@ -872,7 +879,8 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
#endif
- (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
+ (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
+ (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
dst_release(dst);
dst = NULL;
}
@@ -881,10 +889,9 @@ out:
return dst;
}
-static int ip6_dst_lookup_tail(struct sock *sk,
+static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6)
{
- struct net *net = sock_net(sk);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
struct neighbour *n;
struct rt6_info *rt;
@@ -994,10 +1001,11 @@ out_err_release:
*
* It returns zero on success, or a standard errno code on error.
*/
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6)
{
*dst = NULL;
- return ip6_dst_lookup_tail(sk, dst, fl6);
+ return ip6_dst_lookup_tail(net, sk, dst, fl6);
}
EXPORT_SYMBOL_GPL(ip6_dst_lookup);
@@ -1018,11 +1026,13 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
struct dst_entry *dst = NULL;
int err;
- err = ip6_dst_lookup_tail(sk, &dst, fl6);
+ err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
if (err)
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
+ if (!fl6->flowi6_oif)
+ fl6->flowi6_oif = dst->dev->ifindex;
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
@@ -1050,7 +1060,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
dst = ip6_sk_dst_check(sk, dst, fl6);
- err = ip6_dst_lookup_tail(sk, &dst, fl6);
+ err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
if (err)
return ERR_PTR(err);
if (final_dst)
@@ -1647,7 +1657,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
ip6_flow_hdr(hdr, v6_cork->tclass,
ip6_make_flowlabel(net, skb, fl6->flowlabel,
- np->autoflowlabel));
+ np->autoflowlabel, fl6));
hdr->hop_limit = v6_cork->hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2e67b6601..eabffbb89 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,36 +126,92 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
* Locking : hash tables are protected by RCU and RTNL
*/
-struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
+static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
+ struct dst_entry *dst)
{
- struct dst_entry *dst = t->dst_cache;
+ write_seqlock_bh(&idst->lock);
+ dst_release(rcu_dereference_protected(
+ idst->dst,
+ lockdep_is_held(&idst->lock.lock)));
+ if (dst) {
+ dst_hold(dst);
+ idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
+ } else {
+ idst->cookie = 0;
+ }
+ rcu_assign_pointer(idst->dst, dst);
+ write_sequnlock_bh(&idst->lock);
+}
+
+struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
+{
+ struct ip6_tnl_dst *idst;
+ struct dst_entry *dst;
+ unsigned int seq;
+ u32 cookie;
- if (dst && dst->obsolete &&
- !dst->ops->check(dst, t->dst_cookie)) {
- t->dst_cache = NULL;
+ idst = raw_cpu_ptr(t->dst_cache);
+
+ rcu_read_lock();
+ do {
+ seq = read_seqbegin(&idst->lock);
+ dst = rcu_dereference(idst->dst);
+ cookie = idst->cookie;
+ } while (read_seqretry(&idst->lock, seq));
+
+ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+ dst = NULL;
+ rcu_read_unlock();
+
+ if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
+ ip6_tnl_per_cpu_dst_set(idst, NULL);
dst_release(dst);
- return NULL;
+ dst = NULL;
}
-
return dst;
}
-EXPORT_SYMBOL_GPL(ip6_tnl_dst_check);
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
void ip6_tnl_dst_reset(struct ip6_tnl *t)
{
- dst_release(t->dst_cache);
- t->dst_cache = NULL;
+ int i;
+
+ for_each_possible_cpu(i)
+ ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
}
EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
-void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
+void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
+{
+ ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst);
+
+}
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
+
+void ip6_tnl_dst_destroy(struct ip6_tnl *t)
{
- struct rt6_info *rt = (struct rt6_info *) dst;
- t->dst_cookie = rt6_get_cookie(rt);
- dst_release(t->dst_cache);
- t->dst_cache = dst;
+ if (!t->dst_cache)
+ return;
+
+ ip6_tnl_dst_reset(t);
+ free_percpu(t->dst_cache);
}
-EXPORT_SYMBOL_GPL(ip6_tnl_dst_store);
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy);
+
+int ip6_tnl_dst_init(struct ip6_tnl *t)
+{
+ int i;
+
+ t->dst_cache = alloc_percpu(struct ip6_tnl_dst);
+ if (!t->dst_cache)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i)
+ seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip6_tnl_dst_init);
/**
* ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -271,6 +327,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
static void ip6_dev_free(struct net_device *dev)
{
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ ip6_tnl_dst_destroy(t);
free_percpu(dev->tstats);
free_netdev(dev);
}
@@ -510,14 +569,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
struct ipv6_tlv_tnl_enc_lim *tel;
__u32 mtu;
case ICMPV6_DEST_UNREACH:
- net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
+ t->parms.name);
rel_msg = 1;
break;
case ICMPV6_TIME_EXCEED:
if ((*code) == ICMPV6_EXC_HOPLIMIT) {
- net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+ t->parms.name);
rel_msg = 1;
}
break;
@@ -529,13 +588,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
if (teli && teli == *info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
- net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+ t->parms.name);
rel_msg = 1;
}
} else {
- net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
- t->parms.name);
+ net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+ t->parms.name);
}
break;
case ICMPV6_PKT_TOOBIG:
@@ -1010,23 +1069,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
} else if (!fl6->flowi6_mark)
- dst = ip6_tnl_dst_check(t);
+ dst = ip6_tnl_dst_get(t);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
goto tx_err_link_failure;
if (!dst) {
- ndst = ip6_route_output(net, NULL, fl6);
+ dst = ip6_route_output(net, NULL, fl6);
- if (ndst->error)
+ if (dst->error)
goto tx_err_link_failure;
- ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
- if (IS_ERR(ndst)) {
- err = PTR_ERR(ndst);
- ndst = NULL;
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ dst = NULL;
goto tx_err_link_failure;
}
- dst = ndst;
+ ndst = dst;
}
tdev = dst->dev;
@@ -1072,12 +1131,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
consume_skb(skb);
skb = new_skb;
}
- if (fl6->flowi6_mark) {
- skb_dst_set(skb, dst);
- ndst = NULL;
- } else {
- skb_dst_set_noref(skb, dst);
- }
+
+ if (!fl6->flowi6_mark && ndst)
+ ip6_tnl_dst_set(t, ndst);
+ skb_dst_set(skb, dst);
+
skb->transport_header = skb->network_header;
proto = fl6->flowi6_proto;
@@ -1095,20 +1153,18 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
- ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+ ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
ip6tunnel_xmit(NULL, skb, dev);
- if (ndst)
- ip6_tnl_dst_store(t, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
- dst_release(ndst);
+ dst_release(dst);
return err;
}
@@ -1573,12 +1629,21 @@ static inline int
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
+ int ret;
t->dev = dev;
t->net = dev_net(dev);
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
+
+ ret = ip6_tnl_dst_init(t);
+ if (ret) {
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+ return ret;
+ }
+
return 0;
}
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index e1a1136bd..14dacf1df 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -23,6 +23,15 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
if (err < 0)
goto error;
+ if (cfg->ipv6_v6only) {
+ int val = 1;
+
+ err = kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
+ (char *) &val, sizeof(val));
+ if (err < 0)
+ goto error;
+ }
+
udp6_addr.sin6_family = AF_INET6;
memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 5f36266b1..0e004cc42 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -217,7 +217,6 @@ static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
.match = ip6mr_rule_match,
.configure = ip6mr_rule_configure,
.compare = ip6mr_rule_compare,
- .default_pref = fib_default_rule_pref,
.fill = ip6mr_rule_fill,
.nlgroup = RTNLGRP_IPV6_RULE,
.policy = ip6mr_rule_policy,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c53331cfe..64a71354b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -553,7 +553,8 @@ static void ndisc_send_unsol_na(struct net_device *dev)
void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *solicit,
- const struct in6_addr *daddr, const struct in6_addr *saddr)
+ const struct in6_addr *daddr, const struct in6_addr *saddr,
+ struct sk_buff *oskb)
{
struct sk_buff *skb;
struct in6_addr addr_buf;
@@ -589,6 +590,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
dev->dev_addr);
+ if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
+ skb_dst_copy(skb, oskb);
+
ndisc_send_skb(skb, daddr, saddr);
}
@@ -675,12 +679,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
"%s: trying to ucast probe in NUD_INVALID: %pI6\n",
__func__, target);
}
- ndisc_send_ns(dev, neigh, target, target, saddr);
+ ndisc_send_ns(dev, neigh, target, target, saddr, skb);
} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
neigh_app_ns(neigh);
} else {
addrconf_addr_solict_mult(target, &mcaddr);
- ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
+ ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
}
}
@@ -1074,6 +1078,8 @@ static void ndisc_router_discovery(struct sk_buff *skb)
struct ndisc_options ndopts;
int optlen;
unsigned int pref = 0;
+ __u32 old_if_flags;
+ bool send_ifinfo_notify = false;
__u8 *opt = (__u8 *)(ra_msg + 1);
@@ -1144,6 +1150,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
* Remember the managed/otherconf flags from most recently
* received RA message (RFC 2462) -- yoshfuji
*/
+ old_if_flags = in6_dev->if_flags;
in6_dev->if_flags = (in6_dev->if_flags & ~(IF_RA_MANAGED |
IF_RA_OTHERCONF)) |
(ra_msg->icmph.icmp6_addrconf_managed ?
@@ -1151,6 +1158,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
(ra_msg->icmph.icmp6_addrconf_other ?
IF_RA_OTHERCONF : 0);
+ if (old_if_flags != in6_dev->if_flags)
+ send_ifinfo_notify = true;
+
if (!in6_dev->cnf.accept_ra_defrtr) {
ND_PRINTK(2, info,
"RA: %s, defrtr is false for dev: %s\n",
@@ -1225,18 +1235,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (rt)
rt6_set_expires(rt, jiffies + (HZ * lifetime));
- if (ra_msg->icmph.icmp6_hop_limit) {
- /* Only set hop_limit on the interface if it is higher than
- * the current hop_limit.
- */
- if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+ if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+ ra_msg->icmph.icmp6_hop_limit) {
+ if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ if (rt)
+ dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+ ra_msg->icmph.icmp6_hop_limit);
} else {
- ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
}
- if (rt)
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
- ra_msg->icmph.icmp6_hop_limit);
}
skip_defrtr:
@@ -1254,7 +1262,7 @@ skip_defrtr:
rtime = HZ/10;
NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime);
in6_dev->tstamp = jiffies;
- inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+ send_ifinfo_notify = true;
}
rtime = ntohl(ra_msg->reachable_time);
@@ -1271,11 +1279,17 @@ skip_defrtr:
GC_STALETIME, 3 * rtime);
in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime);
in6_dev->tstamp = jiffies;
- inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+ send_ifinfo_notify = true;
}
}
}
+ /*
+ * Send a notify if RA changed managed/otherconf flags or timer settings
+ */
+ if (send_ifinfo_notify)
+ inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+
skip_linkparms:
/*
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index b552cf0d6..f6a024e14 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -47,9 +47,22 @@ config NFT_REJECT_IPV6
default NFT_REJECT
tristate
+config NFT_DUP_IPV6
+ tristate "IPv6 nf_tables packet duplication support"
+ select NF_DUP_IPV6
+ help
+ This module enables IPv6 packet duplication support for nf_tables.
+
endif # NF_TABLES_IPV6
endif # NF_TABLES
+config NF_DUP_IPV6
+ tristate "Netfilter IPv6 packet duplication to alternate destination"
+ depends on !NF_CONNTRACK || NF_CONNTRACK
+ help
+ This option enables the nf_dup_ipv6 core, which duplicates an IPv6
+ packet to be rerouted to another destination.
+
config NF_REJECT_IPV6
tristate "IPv6 packet rejection"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c36e0a549..b4f7d0b4e 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -30,6 +30,8 @@ obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o
# reject
obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
+obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
+
# nf_tables
obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
@@ -37,6 +39,7 @@ obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
+obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 3c35ced39..0771991ed 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -305,7 +305,7 @@ static void trace_packet(const struct sk_buff *skb,
}
#endif
-static inline __pure struct ip6t_entry *
+static inline struct ip6t_entry *
ip6t_next_entry(const struct ip6t_entry *entry)
{
return (void *)entry + entry->next_offset;
@@ -324,12 +324,13 @@ ip6t_do_table(struct sk_buff *skb,
const char *indev, *outdev;
const void *table_base;
struct ip6t_entry *e, **jumpstack;
- unsigned int *stackptr, origptr, cpu;
+ unsigned int stackidx, cpu;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
/* Initialization */
+ stackidx = 0;
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
/* We handle fragments by dealing with the first fragment as
@@ -357,8 +358,16 @@ ip6t_do_table(struct sk_buff *skb,
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
- stackptr = per_cpu_ptr(private->stackptr, cpu);
- origptr = *stackptr;
+
+ /* Switch to alternate jumpstack if we're being invoked via TEE.
+ * TEE issues XT_CONTINUE verdict on original skb so we must not
+ * clobber the jumpstack.
+ *
+ * For recursion via REJECT or SYNPROXY the stack will be clobbered
+ * but it is no problem since absolute verdict is issued by these.
+ */
+ if (static_key_false(&xt_tee_enabled))
+ jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
e = get_entry(table_base, private->hook_entry[hook]);
@@ -406,20 +415,16 @@ ip6t_do_table(struct sk_buff *skb,
verdict = (unsigned int)(-v) - 1;
break;
}
- if (*stackptr <= origptr)
+ if (stackidx == 0)
e = get_entry(table_base,
private->underflow[hook]);
else
- e = ip6t_next_entry(jumpstack[--*stackptr]);
+ e = ip6t_next_entry(jumpstack[--stackidx]);
continue;
}
if (table_base + v != ip6t_next_entry(e) &&
!(e->ipv6.flags & IP6T_F_GOTO)) {
- if (*stackptr >= private->stacksize) {
- verdict = NF_DROP;
- break;
- }
- jumpstack[(*stackptr)++] = e;
+ jumpstack[stackidx++] = e;
}
e = get_entry(table_base, v);
@@ -437,8 +442,6 @@ ip6t_do_table(struct sk_buff *skb,
break;
} while (!acpar.hotdrop);
- *stackptr = origptr;
-
xt_write_recseq_end(addend);
local_bh_enable();
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 12331efd4..0ed841a3f 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -35,14 +35,12 @@ MODULE_AUTHOR("Yasuyuki KOZAKAI <yasuyuki.kozakai@toshiba.co.jp>");
MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv6");
MODULE_LICENSE("GPL");
-
static unsigned int
reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_reject_info *reject = par->targinfo;
struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
- pr_debug("%s: medium point\n", __func__);
switch (reject->with) {
case IP6T_ICMP6_NO_ROUTE:
nf_send_unreach6(net, skb, ICMPV6_NOROUTE, par->hooknum);
@@ -65,8 +63,11 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
case IP6T_TCP_RESET:
nf_send_reset6(net, skb, par->hooknum);
break;
- default:
- net_info_ratelimited("case %u not handled yet\n", reject->with);
+ case IP6T_ICMP6_POLICY_FAIL:
+ nf_send_unreach6(net, skb, ICMPV6_POLICY_FAIL, par->hooknum);
+ break;
+ case IP6T_ICMP6_REJECT_ROUTE:
+ nf_send_unreach6(net, skb, ICMPV6_REJECT_ROUTE, par->hooknum);
break;
}
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index ebbb754c2..1e4bf99ed 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -237,7 +237,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
nth->ack_seq = th->ack_seq;
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
- nth->window = ntohs(htons(th->window) >> opts->wscale);
+ nth->window = htons(ntohs(th->window) >> opts->wscale);
nth->check = 0;
nth->urg_ptr = 0;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4ba0c34c6..7302900c3 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -251,7 +251,7 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
if (*len < 0 || (unsigned int) *len < sizeof(sin6))
return -EINVAL;
- h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+ h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (!h) {
pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
&tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 90388d606..0e6fae103 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -150,7 +150,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_tuple intuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_l4proto *inproto;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+ struct nf_conntrack_zone tmp;
NF_CT_ASSERT(skb->nfct == NULL);
@@ -177,7 +177,8 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
*ctinfo = IP_CT_RELATED;
- h = nf_conntrack_find_get(net, zone, &intuple);
+ h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
+ &intuple);
if (!h) {
pr_debug("icmpv6_error: no match\n");
return -NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6d0249817..c7196ad1d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -633,6 +633,7 @@ ret_orig:
kfree_skb(clone);
return skb;
}
+EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
void nf_ct_frag6_consume_orig(struct sk_buff *skb)
{
@@ -645,6 +646,7 @@ void nf_ct_frag6_consume_orig(struct sk_buff *skb)
s = s2;
}
}
+EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
static int nf_ct_net_init(struct net *net)
{
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index a45db0b47..6d9c0b3d5 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -33,23 +33,22 @@
static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
- u16 zone = NF_CT_DEFAULT_ZONE;
-
+ u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (skb->nfct)
- zone = nf_ct_zone((struct nf_conn *)skb->nfct);
-#endif
+ if (skb->nfct) {
+ enum ip_conntrack_info ctinfo;
+ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (skb->nf_bridge &&
- skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
- return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+ }
#endif
+ if (nf_bridge_in_prerouting(skb))
+ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
+
if (hooknum == NF_INET_PRE_ROUTING)
- return IP6_DEFRAG_CONNTRACK_IN + zone;
+ return IP6_DEFRAG_CONNTRACK_IN + zone_id;
else
- return IP6_DEFRAG_CONNTRACK_OUT + zone;
-
+ return IP6_DEFRAG_CONNTRACK_OUT + zone_id;
}
static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
new file mode 100644
index 000000000..c8ab62655
--- /dev/null
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -0,0 +1,97 @@
+/*
+ * (C) 2007 by Sebastian ClaĂŸen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+ const struct dst_entry *dst;
+
+ if (skb->dev != NULL)
+ return dev_net(skb->dev);
+ dst = skb_dst(skb);
+ if (dst != NULL && dst->dev != NULL)
+ return dev_net(dst->dev);
+#endif
+ return &init_net;
+}
+
+static bool nf_dup_ipv6_route(struct sk_buff *skb, const struct in6_addr *gw,
+ int oif)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct net *net = pick_net(skb);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ if (oif != -1)
+ fl6.flowi6_oif = oif;
+
+ fl6.daddr = *gw;
+ fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
+ (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ return false;
+ }
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+ skb->dev = dst->dev;
+ skb->protocol = htons(ETH_P_IPV6);
+
+ return true;
+}
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+ const struct in6_addr *gw, int oif)
+{
+ if (this_cpu_read(nf_skb_duplicated))
+ return;
+ skb = pskb_copy(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING ||
+ hooknum == NF_INET_LOCAL_IN) {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ --iph->hop_limit;
+ }
+ if (nf_dup_ipv6_route(skb, gw, oif)) {
+ __this_cpu_write(nf_skb_duplicated, true);
+ ip6_local_out(skb);
+ __this_cpu_write(nf_skb_duplicated, false);
+ } else {
+ kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv6);
+
+MODULE_AUTHOR("Sebastian ClaĂŸen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv6: IPv6 packet duplication");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index e76900e0a..70fbaed49 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -124,7 +124,7 @@ static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
newip = &t->dst.u3.in6;
}
inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
- newip->s6_addr32, 1);
+ newip->s6_addr32, true);
}
static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
@@ -155,7 +155,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
}
} else
inet_proto_csum_replace2(check, skb,
- htons(oldlen), htons(datalen), 1);
+ htons(oldlen), htons(datalen), true);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 2205e8eee..57593b00c 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -73,7 +73,7 @@ icmpv6_manip_pkt(struct sk_buff *skb,
hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
hdr->icmp6_identifier,
- tuple->src.u.icmp.id, 0);
+ tuple->src.u.icmp.id, false);
hdr->icmp6_identifier = tuple->src.u.icmp.id;
}
return true;
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
new file mode 100644
index 000000000..0eaa4f65f
--- /dev/null
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+struct nft_dup_ipv6 {
+ enum nft_registers sreg_addr:8;
+ enum nft_registers sreg_dev:8;
+};
+
+static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+ struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
+ int oif = regs->data[priv->sreg_dev];
+
+ nf_dup_ipv6(pkt->skb, pkt->ops->hooknum, gw, oif);
+}
+
+static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+ int err;
+
+ if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+ return -EINVAL;
+
+ priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+ err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+ priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+ return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+ }
+ return 0;
+}
+
+static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+ nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv6_type;
+static const struct nft_expr_ops nft_dup_ipv6_ops = {
+ .type = &nft_dup_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv6)),
+ .eval = nft_dup_ipv6_eval,
+ .init = nft_dup_ipv6_init,
+ .dump = nft_dup_ipv6_dump,
+};
+
+static const struct nla_policy nft_dup_ipv6_policy[NFTA_DUP_MAX + 1] = {
+ [NFTA_DUP_SREG_ADDR] = { .type = NLA_U32 },
+ [NFTA_DUP_SREG_DEV] = { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "dup",
+ .ops = &nft_dup_ipv6_ops,
+ .policy = nft_dup_ipv6_policy,
+ .maxattr = NFTA_DUP_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_dup_ipv6_module_init(void)
+{
+ return nft_register_expr(&nft_dup_ipv6_type);
+}
+
+static void __exit nft_dup_ipv6_module_exit(void)
+{
+ nft_unregister_expr(&nft_dup_ipv6_type);
+}
+
+module_init(nft_dup_ipv6_module_init);
+module_exit(nft_dup_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ca4700cb2..fdbada156 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -295,7 +295,8 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
* unspecified and mapped address have a v4 equivalent.
*/
v4addr = LOOPBACK4_IPV6;
- if (!(addr_type & IPV6_ADDR_MULTICAST)) {
+ if (!(addr_type & IPV6_ADDR_MULTICAST) &&
+ !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dd6ebba58..946880ad4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -54,10 +54,13 @@
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
+#include <net/dst_metadata.h>
#include <net/xfrm.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <net/nexthop.h>
+#include <net/lwtunnel.h>
+#include <net/ip_tunnels.h>
#include <asm/uaccess.h>
@@ -245,12 +248,6 @@ static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
{
}
-static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
- unsigned long old)
-{
- return NULL;
-}
-
static struct dst_ops ip6_dst_blackhole_ops = {
.family = AF_INET6,
.destroy = ip6_dst_destroy,
@@ -259,7 +256,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
.redirect = ip6_rt_blackhole_redirect,
- .cow_metrics = ip6_rt_blackhole_cow_metrics,
+ .cow_metrics = dst_cow_metrics_generic,
.neigh_lookup = ip6_neigh_lookup,
};
@@ -316,6 +313,15 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
#endif
+static void rt6_info_init(struct rt6_info *rt)
+{
+ struct dst_entry *dst = &rt->dst;
+
+ memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+ INIT_LIST_HEAD(&rt->rt6i_siblings);
+ INIT_LIST_HEAD(&rt->rt6i_uncached);
+}
+
/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net,
struct net_device *dev,
@@ -324,13 +330,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
0, DST_OBSOLETE_FORCE_CHK, flags);
- if (rt) {
- struct dst_entry *dst = &rt->dst;
+ if (rt)
+ rt6_info_init(rt);
- memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
- INIT_LIST_HEAD(&rt->rt6i_siblings);
- INIT_LIST_HEAD(&rt->rt6i_uncached);
- }
return rt;
}
@@ -536,13 +538,14 @@ static void rt6_probe_deferred(struct work_struct *w)
container_of(w, struct __rt6_probe_work, work);
addrconf_addr_solict_mult(&work->target, &mcaddr);
- ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+ ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
dev_put(work->dev);
kfree(work);
}
static void rt6_probe(struct rt6_info *rt)
{
+ struct __rt6_probe_work *work;
struct neighbour *neigh;
/*
* Okay, this does not seem to be appropriate
@@ -557,34 +560,33 @@ static void rt6_probe(struct rt6_info *rt)
rcu_read_lock_bh();
neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
if (neigh) {
- write_lock(&neigh->lock);
if (neigh->nud_state & NUD_VALID)
goto out;
- }
-
- if (!neigh ||
- time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
- struct __rt6_probe_work *work;
+ work = NULL;
+ write_lock(&neigh->lock);
+ if (!(neigh->nud_state & NUD_VALID) &&
+ time_after(jiffies,
+ neigh->updated +
+ rt->rt6i_idev->cnf.rtr_probe_interval)) {
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work)
+ __neigh_set_probe_once(neigh);
+ }
+ write_unlock(&neigh->lock);
+ } else {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ }
- if (neigh && work)
- __neigh_set_probe_once(neigh);
-
- if (neigh)
- write_unlock(&neigh->lock);
+ if (work) {
+ INIT_WORK(&work->work, rt6_probe_deferred);
+ work->target = rt->rt6i_gateway;
+ dev_hold(rt->dst.dev);
+ work->dev = rt->dst.dev;
+ schedule_work(&work->work);
+ }
- if (work) {
- INIT_WORK(&work->work, rt6_probe_deferred);
- work->target = rt->rt6i_gateway;
- dev_hold(rt->dst.dev);
- work->dev = rt->dst.dev;
- schedule_work(&work->work);
- }
- } else {
out:
- write_unlock(&neigh->lock);
- }
rcu_read_unlock_bh();
}
#else
@@ -663,6 +665,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
{
int m;
bool match_do_rr = false;
+ struct inet6_dev *idev = rt->rt6i_idev;
+ struct net_device *dev = rt->dst.dev;
+
+ if (dev && !netif_carrier_ok(dev) &&
+ idev->cnf.ignore_routes_with_linkdown)
+ goto out;
if (rt6_check_expired(rt))
goto out;
@@ -1060,6 +1068,9 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
saved_fn = fn;
+ if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+ oif = 0;
+
redo_rt6_select:
rt = rt6_select(fn, oif, strict);
if (rt->rt6i_nsiblings)
@@ -1155,6 +1166,7 @@ void ip6_route_input(struct sk_buff *skb)
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct net *net = dev_net(skb->dev);
int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct ip_tunnel_info *tun_info;
struct flowi6 fl6 = {
.flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr,
@@ -1164,6 +1176,10 @@ void ip6_route_input(struct sk_buff *skb)
.flowi6_proto = iph->nexthdr,
};
+ tun_info = skb_tunnel_info(skb);
+ if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
+ fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
+ skb_dst_drop(skb);
skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
}
@@ -1177,13 +1193,16 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
struct flowi6 *fl6)
{
int flags = 0;
+ bool any_src;
fl6->flowi6_iif = LOOPBACK_IFINDEX;
- if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
+ any_src = ipv6_addr_any(&fl6->saddr);
+ if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
+ (fl6->flowi6_oif && any_src))
flags |= RT6_LOOKUP_F_IFACE;
- if (!ipv6_addr_any(&fl6->saddr))
+ if (!any_src)
flags |= RT6_LOOKUP_F_HAS_SADDR;
else if (sk)
flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
@@ -1199,24 +1218,20 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
- new = &rt->dst;
-
- memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
+ rt6_info_init(rt);
+ new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard_sk;
- if (dst_metrics_read_only(&ort->dst))
- new->_metrics = ort->dst._metrics;
- else
- dst_copy_metrics(new, &ort->dst);
+ dst_copy_metrics(new, &ort->dst);
rt->rt6i_idev = ort->rt6i_idev;
if (rt->rt6i_idev)
in6_dev_hold(rt->rt6i_idev);
rt->rt6i_gateway = ort->rt6i_gateway;
- rt->rt6i_flags = ort->rt6i_flags;
+ rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
rt->rt6i_metric = 0;
memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -1309,8 +1324,7 @@ static void ip6_link_failure(struct sk_buff *skb)
if (rt) {
if (rt->rt6i_flags & RTF_CACHE) {
dst_hold(&rt->dst);
- if (ip6_del_rt(rt))
- dst_free(&rt->dst);
+ ip6_del_rt(rt);
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
rt->rt6i_node->fn_sernum = -1;
}
@@ -1685,6 +1699,7 @@ out:
static int ip6_convert_metrics(struct mx6_config *mxc,
const struct fib6_config *cfg)
{
+ bool ecn_ca = false;
struct nlattr *nla;
int remaining;
u32 *mp;
@@ -1698,30 +1713,36 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
int type = nla_type(nla);
+ u32 val;
+
+ if (!type)
+ continue;
+ if (unlikely(type > RTAX_MAX))
+ goto err;
- if (type) {
- u32 val;
+ if (type == RTAX_CC_ALGO) {
+ char tmp[TCP_CA_NAME_MAX];
- if (unlikely(type > RTAX_MAX))
+ nla_strlcpy(tmp, nla, sizeof(tmp));
+ val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ if (val == TCP_CA_UNSPEC)
goto err;
- if (type == RTAX_CC_ALGO) {
- char tmp[TCP_CA_NAME_MAX];
+ } else {
+ val = nla_get_u32(nla);
+ }
+ if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+ goto err;
- nla_strlcpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(tmp);
- if (val == TCP_CA_UNSPEC)
- goto err;
- } else {
- val = nla_get_u32(nla);
- }
+ mp[type - 1] = val;
+ __set_bit(type - 1, mxc->mx_valid);
+ }
- mp[type - 1] = val;
- __set_bit(type - 1, mxc->mx_valid);
- }
+ if (ecn_ca) {
+ __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
+ mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
}
mxc->mx = mp;
-
return 0;
err:
kfree(mp);
@@ -1801,6 +1822,25 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
rt->dst.output = ip6_output;
+ if (cfg->fc_encap) {
+ struct lwtunnel_state *lwtstate;
+
+ err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+ cfg->fc_encap, AF_INET6, cfg,
+ &lwtstate);
+ if (err)
+ goto out;
+ rt->dst.lwtstate = lwtstate_get(lwtstate);
+ if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
+ rt->dst.lwtstate->orig_output = rt->dst.output;
+ rt->dst.output = lwtunnel_output;
+ }
+ if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
+ rt->dst.lwtstate->orig_input = rt->dst.input;
+ rt->dst.input = lwtunnel_input;
+ }
+ }
+
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
if (rt->rt6i_dst.plen == 128)
@@ -1847,9 +1887,11 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
rt->dst.input = ip6_pkt_prohibit;
break;
case RTN_THROW:
+ case RTN_UNREACHABLE:
default:
rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
- : -ENETUNREACH;
+ : (cfg->fc_type == RTN_UNREACHABLE)
+ ? -EHOSTUNREACH : -ENETUNREACH;
rt->dst.output = ip6_pkt_discard_out;
rt->dst.input = ip6_pkt_discard;
break;
@@ -1989,7 +2031,8 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
struct fib6_table *table;
struct net *net = dev_net(rt->dst.dev);
- if (rt == net->ipv6.ip6_null_entry) {
+ if (rt == net->ipv6.ip6_null_entry ||
+ rt->dst.flags & DST_NOCACHE) {
err = -ENOENT;
goto out;
}
@@ -2204,6 +2247,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
#endif
rt->rt6i_prefsrc = ort->rt6i_prefsrc;
rt->rt6i_table = ort->rt6i_table;
+ rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
}
#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2475,6 +2519,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
+ rt->dst.flags |= DST_NOCACHE;
atomic_set(&rt->dst.__refcnt, 1);
@@ -2653,6 +2698,8 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_PREF] = { .type = NLA_U8 },
+ [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
+ [RTA_ENCAP] = { .type = NLA_NESTED },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2747,6 +2794,12 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->fc_flags |= RTF_PREF(pref);
}
+ if (tb[RTA_ENCAP])
+ cfg->fc_encap = tb[RTA_ENCAP];
+
+ if (tb[RTA_ENCAP_TYPE])
+ cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
err = 0;
errout:
return err;
@@ -2838,6 +2891,10 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
r_cfg.fc_gateway = nla_get_in6_addr(nla);
r_cfg.fc_flags |= RTF_GATEWAY;
}
+ r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+ nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+ if (nla)
+ r_cfg.fc_encap_type = nla_get_u16(nla);
}
err = ip6_route_info_create(&r_cfg, &rt);
@@ -2891,8 +2948,7 @@ cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
if (nh->rt6_info)
dst_free(&nh->rt6_info->dst);
- if (nh->mxc.mx)
- kfree(nh->mxc.mx);
+ kfree(nh->mxc.mx);
list_del(&nh->next);
kfree(nh);
}
@@ -2967,7 +3023,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
return ip6_route_add(&cfg);
}
-static inline size_t rt6_nlmsg_size(void)
+static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
{
return NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(16) /* RTA_SRC */
@@ -2981,7 +3037,8 @@ static inline size_t rt6_nlmsg_size(void)
+ RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
+ nla_total_size(sizeof(struct rta_cacheinfo))
+ nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
- + nla_total_size(1); /* RTA_PREF */
+ + nla_total_size(1) /* RTA_PREF */
+ + lwtunnel_get_encap_size(rt->dst.lwtstate);
}
static int rt6_fill_node(struct net *net,
@@ -3042,6 +3099,11 @@ static int rt6_fill_node(struct net *net,
else
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
+ if (!netif_carrier_ok(rt->dst.dev)) {
+ rtm->rtm_flags |= RTNH_F_LINKDOWN;
+ if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
+ rtm->rtm_flags |= RTNH_F_DEAD;
+ }
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->rt6i_protocol;
if (rt->rt6i_flags & RTF_DYNAMIC)
@@ -3129,6 +3191,8 @@ static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure;
+ lwtunnel_fill_encap(skb, rt->dst.lwtstate);
+
nlmsg_end(skb, nlh);
return 0;
@@ -3245,7 +3309,8 @@ errout:
return err;
}
-void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
+ unsigned int nlm_flags)
{
struct sk_buff *skb;
struct net *net = info->nl_net;
@@ -3255,12 +3320,12 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
err = -ENOBUFS;
seq = info->nlh ? info->nlh->nlmsg_seq : 0;
- skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
+ skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
if (!skb)
goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
- event, info->portid, seq, 0, 0, 0);
+ event, info->portid, seq, 0, 0, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index ac35a2859..dcccae861 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -742,7 +742,7 @@ static int ipip_rcv(struct sk_buff *skb)
goto drop;
if (iptunnel_pull_header(skb, 0, tpi.proto))
goto drop;
- return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
}
return 1;
@@ -1394,34 +1394,20 @@ static int ipip6_tunnel_init(struct net_device *dev)
return 0;
}
-static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
struct net *net = dev_net(dev);
struct sit_net *sitn = net_generic(net, sit_net_id);
- tunnel->dev = dev;
- tunnel->net = dev_net(dev);
-
iph->version = 4;
iph->protocol = IPPROTO_IPV6;
iph->ihl = 5;
iph->ttl = 64;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
- if (!tunnel->dst_cache) {
- free_percpu(dev->tstats);
- return -ENOMEM;
- }
-
dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
- return 0;
}
static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1831,23 +1817,19 @@ static int __net_init sit_init_net(struct net *net)
*/
sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
- err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
- if (err)
- goto err_dev_free;
-
- ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
err = register_netdev(sitn->fb_tunnel_dev);
if (err)
goto err_reg_dev;
+ ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
+ ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+
t = netdev_priv(sitn->fb_tunnel_dev);
strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
- dev_put(sitn->fb_tunnel_dev);
-err_dev_free:
ipip6_dev_free(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 4e705add4..45243bbe5 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -17,6 +17,9 @@
#include <net/inet_frag.h>
static int one = 1;
+static int auto_flowlabels_min;
+static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
+
static struct ctl_table ipv6_table_template[] = {
{
@@ -45,7 +48,9 @@ static struct ctl_table ipv6_table_template[] = {
.data = &init_net.ipv6.sysctl.auto_flowlabels,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &auto_flowlabels_min,
+ .extra2 = &auto_flowlabels_max
},
{
.procname = "fwmark_reflect",
@@ -75,6 +80,13 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "ip_nonlocal_bind",
+ .data = &init_net.ipv6.sysctl.ip_nonlocal_bind,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{ }
};
@@ -117,6 +129,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
+ ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a6f28765d..0b959b0c0 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -277,7 +277,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (err)
goto late_failure;
- ip6_set_txhash(sk);
+ sk_set_txhash(sk);
#ifdef CONFIG_TCP_STEALTH
/* If CONFIG_TCP_STEALTH is defined, we need to know the timestamp as
@@ -1106,7 +1106,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
newsk->sk_bound_dev_if = ireq->ir_iif;
- ip6_set_txhash(newsk);
+ sk_set_txhash(newsk);
/* Now IPv6 options...
@@ -1505,8 +1505,7 @@ do_time_wait:
ntohs(th->dest), tcp_v6_iif(skb));
if (sk2) {
struct inet_timewait_sock *tw = inet_twsk(sk);
- inet_twsk_deschedule(tw);
- inet_twsk_put(tw);
+ inet_twsk_deschedule_put(tw);
sk = sk2;
tcp_v6_restore_cb(skb);
goto process;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e51fc3eee..0aba654f5 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1496,7 +1496,8 @@ int __net_init udp6_proc_init(struct net *net)
return udp_proc_register(net, &udp6_seq_afinfo);
}
-void udp6_proc_exit(struct net *net) {
+void udp6_proc_exit(struct net *net)
+{
udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 901ef6f8a..f7fbdbabe 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -20,10 +20,9 @@
static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
{
- const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
- if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+ if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
IP6_ECN_set_ce(inner_iph);
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 09c76a7b4..e15feb7b4 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -79,6 +79,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (!skb->ignore_df && skb->len > mtu) {
skb->dev = dst->dev;
+ skb->protocol = htons(ETH_P_IPV6);
if (xfrm6_local_dontfrag(skb))
xfrm6_local_rxpmtu(skb, mtu);
@@ -136,6 +137,7 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
int mtu;
+ bool toobig;
#ifdef CONFIG_NETFILTER
if (!x) {
@@ -144,25 +146,29 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
}
#endif
+ if (x->props.mode != XFRM_MODE_TUNNEL)
+ goto skip_frag;
+
if (skb->protocol == htons(ETH_P_IPV6))
mtu = ip6_skb_dst_mtu(skb);
else
mtu = dst_mtu(skb_dst(skb));
- if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+ toobig = skb->len > mtu && !skb_is_gso(skb);
+
+ if (toobig && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
return -EMSGSIZE;
- } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
+ } else if (!skb->ignore_df && toobig && skb->sk) {
xfrm_local_error(skb, mtu);
return -EMSGSIZE;
}
- if (x->props.mode == XFRM_MODE_TUNNEL &&
- ((skb->len > mtu && !skb_is_gso(skb)) ||
- dst_allfrag(skb_dst(skb)))) {
+ if (toobig || dst_allfrag(skb_dst(skb)))
return ip6_fragment(sk, skb,
x->outer_mode->afinfo->output_finish);
- }
+
+skip_frag:
return x->outer_mode->afinfo->output_finish(sk, skb);
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ed0583c1b..da55e0c85 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -20,13 +20,14 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
+#include <net/vrf.h>
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/mip6.h>
#endif
static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
@@ -35,6 +36,8 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
int err;
memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
if (saddr)
memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -50,13 +53,13 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
return dst;
}
-static int xfrm6_get_saddr(struct net *net,
+static int xfrm6_get_saddr(struct net *net, int oif,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct net_device *dev;
- dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
+ dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
@@ -130,8 +133,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
nexthdr = nh[nhoff];
- if (skb_dst(skb))
- oif = skb_dst(skb)->dev->ifindex;
+ if (skb_dst(skb)) {
+ oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
+ : skb_dst(skb)->dev->ifindex;
+ }
memset(fl6, 0, sizeof(struct flowi6));
fl6->flowi6_mark = skb->mark;
@@ -174,7 +179,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
return;
case IPPROTO_ICMPV6:
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+ if (!onlyproto && (nh + offset + 2 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
u8 *icmp;
nh = skb_network_header(skb);
@@ -188,7 +194,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
offset += ipv6_optlen(exthdr);
- if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+ if (!onlyproto && (nh + offset + 3 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
struct ip6_mh *mh;
nh = skb_network_header(skb);
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index a26c401ef..43964594a 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1839,7 +1839,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
for (element = hashbin_get_first(iter->hashbin);
element != NULL;
element = hashbin_get_next(iter->hashbin)) {
- if (!off || *off-- == 0) {
+ if (!off || (*off)-- == 0) {
/* NB: hashbin left locked */
return element;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 83a706887..f9c9ecb0c 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -261,7 +261,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
- /* Error is cleare after succecful sending to at least one
+ /* Error is cleared after successful sending to at least one
* registered KM */
if ((broadcast_flags & BROADCAST_REGISTERED) && err)
err = err2;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 086de496a..3891cbd2a 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -7,7 +7,6 @@ config MAC80211
select CRYPTO_CCM
select CRYPTO_GCM
select CRC32
- select AVERAGE
---help---
This option enables the hardware independent IEEE 802.11
networking stack.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 3275f0188..783e891b7 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_MAC80211) += mac80211.o
# mac80211 objects
mac80211-y := \
main.o status.o \
+ driver-ops.o \
sta_info.o \
wep.o \
wpa.o \
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 4192806be..bdf0790d8 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -145,20 +145,3 @@ void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
{
crypto_free_cipher(tfm);
}
-
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
- u8 *k1, u8 *k2)
-{
- u8 l[AES_BLOCK_SIZE] = {};
- struct ieee80211_key *key =
- container_of(keyconf, struct ieee80211_key, conf);
-
- crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
-
- memcpy(k1, l, AES_BLOCK_SIZE);
- gf_mulx(k1);
-
- memcpy(k2, k1, AES_BLOCK_SIZE);
- gf_mulx(k2);
-}
-EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index bf7023f6c..7a77a1470 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1019,6 +1019,65 @@ static int sta_apply_auth_flags(struct ieee80211_local *local,
return 0;
}
+static void sta_apply_mesh_params(struct ieee80211_local *local,
+ struct sta_info *sta,
+ struct station_parameters *params)
+{
+#ifdef CONFIG_MAC80211_MESH
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u32 changed = 0;
+
+ if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
+ switch (params->plink_state) {
+ case NL80211_PLINK_ESTAB:
+ if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+ changed = mesh_plink_inc_estab_count(sdata);
+ sta->mesh->plink_state = params->plink_state;
+
+ ieee80211_mps_sta_status_update(sta);
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ sdata->u.mesh.mshcfg.power_mode);
+ break;
+ case NL80211_PLINK_LISTEN:
+ case NL80211_PLINK_BLOCKED:
+ case NL80211_PLINK_OPN_SNT:
+ case NL80211_PLINK_OPN_RCVD:
+ case NL80211_PLINK_CNF_RCVD:
+ case NL80211_PLINK_HOLDING:
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ changed = mesh_plink_dec_estab_count(sdata);
+ sta->mesh->plink_state = params->plink_state;
+
+ ieee80211_mps_sta_status_update(sta);
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ NL80211_MESH_POWER_UNKNOWN);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+
+ switch (params->plink_action) {
+ case NL80211_PLINK_ACTION_NO_ACTION:
+ /* nothing */
+ break;
+ case NL80211_PLINK_ACTION_OPEN:
+ changed |= mesh_plink_open(sta);
+ break;
+ case NL80211_PLINK_ACTION_BLOCK:
+ changed |= mesh_plink_block(sta);
+ break;
+ }
+
+ if (params->local_pm)
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ params->local_pm);
+
+ ieee80211_mbss_info_change_notify(sdata, changed);
+#endif
+}
+
static int sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
@@ -1076,7 +1135,6 @@ static int sta_apply_parameters(struct ieee80211_local *local,
}
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
- sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
if (set & BIT(NL80211_STA_FLAG_MFP))
set_sta_flag(sta, WLAN_STA_MFP);
else
@@ -1097,6 +1155,12 @@ static int sta_apply_parameters(struct ieee80211_local *local,
params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)
set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+ ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+ params->ext_capab_len >= 8 &&
+ params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
+ set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW);
+
if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
sta->sta.uapsd_queues = params->uapsd_queues;
sta->sta.max_sp = params->max_sp;
@@ -1144,62 +1208,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
band, false);
}
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
-#ifdef CONFIG_MAC80211_MESH
- u32 changed = 0;
-
- if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
- switch (params->plink_state) {
- case NL80211_PLINK_ESTAB:
- if (sta->plink_state != NL80211_PLINK_ESTAB)
- changed = mesh_plink_inc_estab_count(
- sdata);
- sta->plink_state = params->plink_state;
-
- ieee80211_mps_sta_status_update(sta);
- changed |= ieee80211_mps_set_sta_local_pm(sta,
- sdata->u.mesh.mshcfg.power_mode);
- break;
- case NL80211_PLINK_LISTEN:
- case NL80211_PLINK_BLOCKED:
- case NL80211_PLINK_OPN_SNT:
- case NL80211_PLINK_OPN_RCVD:
- case NL80211_PLINK_CNF_RCVD:
- case NL80211_PLINK_HOLDING:
- if (sta->plink_state == NL80211_PLINK_ESTAB)
- changed = mesh_plink_dec_estab_count(
- sdata);
- sta->plink_state = params->plink_state;
-
- ieee80211_mps_sta_status_update(sta);
- changed |= ieee80211_mps_set_sta_local_pm(sta,
- NL80211_MESH_POWER_UNKNOWN);
- break;
- default:
- /* nothing */
- break;
- }
- }
-
- switch (params->plink_action) {
- case NL80211_PLINK_ACTION_NO_ACTION:
- /* nothing */
- break;
- case NL80211_PLINK_ACTION_OPEN:
- changed |= mesh_plink_open(sta);
- break;
- case NL80211_PLINK_ACTION_BLOCK:
- changed |= mesh_plink_block(sta);
- break;
- }
-
- if (params->local_pm)
- changed |=
- ieee80211_mps_set_sta_local_pm(sta,
- params->local_pm);
- ieee80211_mbss_info_change_notify(sdata, changed);
-#endif
- }
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ sta_apply_mesh_params(local, sta, params);
/* set the STA state after all sta info from usermode has been set */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
@@ -2358,6 +2368,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
const u8 *ap;
enum ieee80211_smps_mode old_req;
int err;
+ struct sta_info *sta;
+ bool tdls_peer_found = false;
lockdep_assert_held(&sdata->wdev.mtx);
@@ -2382,11 +2394,22 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
ap = sdata->u.mgd.associated->bssid;
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ continue;
+
+ tdls_peer_found = true;
+ break;
+ }
+ rcu_read_unlock();
+
if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
- if (sdata->u.mgd.powersave)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
- else
+ if (tdls_peer_found || !sdata->u.mgd.powersave)
smps_mode = IEEE80211_SMPS_OFF;
+ else
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
}
/* send SM PS frame to AP */
@@ -2394,6 +2417,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
ap, ap);
if (err)
sdata->u.mgd.req_smps = old_req;
+ else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
+ ieee80211_teardown_tdls_peers(sdata);
return err;
}
@@ -2443,8 +2468,13 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
rssi_hyst == bss_conf->cqm_rssi_hyst)
return 0;
+ if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return -EOPNOTSUPP;
+
bss_conf->cqm_rssi_thold = rssi_thold;
bss_conf->cqm_rssi_hyst = rssi_hyst;
+ sdata->u.mgd.last_cqm_event_signal = 0;
/* tell the driver upon association, unless already associated */
if (sdata->u.mgd.associated &&
@@ -2479,16 +2509,28 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs,
sizeof(mask->control[i].ht_mcs));
+ memcpy(sdata->rc_rateidx_vht_mcs_mask[i],
+ mask->control[i].vht_mcs,
+ sizeof(mask->control[i].vht_mcs));
sdata->rc_has_mcs_mask[i] = false;
+ sdata->rc_has_vht_mcs_mask[i] = false;
if (!sband)
continue;
- for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++)
+ for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
if (~sdata->rc_rateidx_mcs_mask[i][j]) {
sdata->rc_has_mcs_mask[i] = true;
break;
}
+ }
+
+ for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
+ if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
+ sdata->rc_has_vht_mcs_mask[i] = true;
+ break;
+ }
+ }
}
return 0;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f01c18a31..1d1b9b7bd 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -190,7 +190,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
return NULL;
}
-static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
{
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_20:
@@ -264,9 +264,17 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
case NL80211_IFTYPE_AP_VLAN:
width = ieee80211_get_max_required_bw(sdata);
break;
+ case NL80211_IFTYPE_STATION:
+ /*
+ * The ap's sta->bandwidth is not set yet at this
+ * point, so take the width from the chandef, but
+ * account also for TDLS peers
+ */
+ width = max(vif->bss_conf.chandef.width,
+ ieee80211_get_max_required_bw(sdata));
+ break;
case NL80211_IFTYPE_P2P_DEVICE:
continue;
- case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
@@ -554,12 +562,13 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
kfree_rcu(ctx, rcu_head);
}
-static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx)
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
struct ieee80211_sub_if_data *sdata;
const struct cfg80211_chan_def *compat = NULL;
+ struct sta_info *sta;
lockdep_assert_held(&local->chanctx_mtx);
@@ -581,6 +590,20 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
if (WARN_ON_ONCE(!compat))
break;
}
+
+ /* TDLS peers can sometimes affect the chandef width */
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (!sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+ !sta->tdls_chandef.chan)
+ continue;
+
+ compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
+ compat);
+ if (WARN_ON_ONCE(!compat))
+ break;
+ }
rcu_read_unlock();
if (!compat)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 3ea8b7de9..1560c8482 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -122,6 +122,7 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
FLAG(CHANCTX_STA_CSA),
FLAG(SUPPORTS_CLONED_SKBS),
FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+ FLAG(TDLS_WIDER_BW),
/* keep last for the build bug below */
(void *)0x1
@@ -148,7 +149,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
if (test_bit(i, local->hw.flags))
- pos += scnprintf(pos, end - pos, "%s",
+ pos += scnprintf(pos, end - pos, "%s\n",
hw_flag_names[i]);
}
@@ -277,7 +278,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_STATS_ADD(rx_handlers_queued);
DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
- DEBUGFS_STATS_ADD(rx_handlers_drop_short);
DEBUGFS_STATS_ADD(tx_expand_skb_head);
DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e82bf1e9d..702ca122c 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -57,7 +57,6 @@ KEY_CONF_FILE(keylen, D);
KEY_CONF_FILE(keyidx, D);
KEY_CONF_FILE(hw_key_idx, D);
KEY_FILE(flags, X);
-KEY_FILE(tx_rx_count, D);
KEY_READ(ifindex, sdata->name, "%s\n");
KEY_OPS(ifindex);
@@ -310,7 +309,6 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
DEBUGFS_ADD(flags);
DEBUGFS_ADD(keyidx);
DEBUGFS_ADD(hw_key_idx);
- DEBUGFS_ADD(tx_rx_count);
DEBUGFS_ADD(algorithm);
DEBUGFS_ADD(tx_spec);
DEBUGFS_ADD(rx_spec);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index c09c0131b..1021e87c0 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -186,6 +186,38 @@ IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
+ const struct ieee80211_sub_if_data *sdata,
+ char *buf, int buflen)
+{
+ int i, len = 0;
+ const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+ len += scnprintf(buf + len, buflen - len, "\n");
+
+ return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz);
+
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
+ const struct ieee80211_sub_if_data *sdata,
+ char *buf, int buflen)
+{
+ int i, len = 0;
+ const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+ len += scnprintf(buf + len, buflen - len, "\n");
+
+ return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
+
IEEE80211_IF_FILE(flags, flags, HEX);
IEEE80211_IF_FILE(state, state, LHEX);
IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
@@ -565,6 +597,8 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+ DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
}
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
new file mode 100644
index 000000000..267c3b1ca
--- /dev/null
+++ b/net/mac80211/driver-ops.c
@@ -0,0 +1,41 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "trace.h"
+#include "driver-ops.h"
+
+__must_check
+int drv_sta_state(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
+ if (local->ops->sta_state) {
+ ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
+ old_state, new_state);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = drv_sta_add(local, sdata, &sta->sta);
+ if (ret == 0)
+ sta->uploaded = true;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ drv_sta_remove(local, sdata, &sta->sta);
+ }
+ trace_drv_return_int(local, ret);
+ return ret;
+}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 32a2e707e..02d91332d 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -573,37 +573,12 @@ static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
trace_drv_return_void(local);
}
-static inline __must_check
+__must_check
int drv_sta_state(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
-{
- int ret = 0;
-
- might_sleep();
-
- sdata = get_bss_sdata(sdata);
- if (!check_sdata_in_driver(sdata))
- return -EIO;
-
- trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
- if (local->ops->sta_state) {
- ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
- old_state, new_state);
- } else if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC) {
- ret = drv_sta_add(local, sdata, &sta->sta);
- if (ret == 0)
- sta->uploaded = true;
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH) {
- drv_sta_remove(local, sdata, &sta->sta);
- }
- trace_drv_return_int(local, ret);
- return ret;
-}
+ enum ieee80211_sta_state new_state);
static inline void drv_sta_rc_update(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b12f61507..6e52659f9 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -84,13 +84,13 @@ struct ieee80211_local;
#define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */)
struct ieee80211_fragment_entry {
- unsigned long first_frag_time;
- unsigned int seq;
- unsigned int rx_queue;
- unsigned int last_frag;
- unsigned int extra_len;
struct sk_buff_head skb_list;
- int ccmp; /* Whether fragments were encrypted with CCMP */
+ unsigned long first_frag_time;
+ u16 seq;
+ u16 extra_len;
+ u16 last_frag;
+ u8 rx_queue;
+ bool ccmp; /* Whether fragments were encrypted with CCMP */
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
};
@@ -181,7 +181,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
/**
* enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_FRAGMENTED: fragmented frame
* @IEEE80211_RX_AMSDU: a-MSDU packet
* @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
* @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
@@ -190,7 +189,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
* @rx_flags field of &struct ieee80211_rx_status.
*/
enum ieee80211_packet_rx_flags {
- IEEE80211_RX_FRAGMENTED = BIT(2),
IEEE80211_RX_AMSDU = BIT(3),
IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4),
IEEE80211_RX_DEFERRED_RELEASE = BIT(5),
@@ -202,8 +200,6 @@ enum ieee80211_packet_rx_flags {
* @IEEE80211_RX_CMNTR: received on cooked monitor already
* @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
* to cfg80211_report_obss_beacon().
- * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
- * reorder buffer timeout timer, not the normal RX path
*
* These flags are used across handling multiple interfaces
* for a single frame.
@@ -211,10 +207,10 @@ enum ieee80211_packet_rx_flags {
enum ieee80211_rx_flags {
IEEE80211_RX_CMNTR = BIT(0),
IEEE80211_RX_BEACON_REPORTED = BIT(1),
- IEEE80211_RX_REORDER_TIMER = BIT(2),
};
struct ieee80211_rx_data {
+ struct napi_struct *napi;
struct sk_buff *skb;
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
@@ -725,6 +721,7 @@ struct ieee80211_if_mesh {
* back to wireless media and to the local net stack.
* @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
* @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
+ * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
*/
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
@@ -732,6 +729,7 @@ enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
IEEE80211_SDATA_IN_DRIVER = BIT(5),
+ IEEE80211_SDATA_MU_MIMO_OWNER = BIT(6),
};
/**
@@ -903,6 +901,9 @@ struct ieee80211_sub_if_data {
bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
u8 rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
+ bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
+ u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+
union {
struct ieee80211_if_ap ap;
struct ieee80211_if_wds wds;
@@ -1010,7 +1011,6 @@ enum sdata_queue_type {
IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
- IEEE80211_SDATA_QUEUE_TDLS_CHSW = 5,
};
enum {
@@ -1286,7 +1286,6 @@ struct ieee80211_local {
unsigned int rx_handlers_queued;
unsigned int rx_handlers_drop_nullfunc;
unsigned int rx_handlers_drop_defrag;
- unsigned int rx_handlers_drop_short;
unsigned int tx_expand_skb_head;
unsigned int tx_expand_skb_head_cloned;
unsigned int rx_expand_skb_head_defrag;
@@ -1348,14 +1347,16 @@ struct ieee80211_local {
struct ieee80211_sub_if_data __rcu *p2p_sdata;
- struct napi_struct *napi;
-
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
struct cfg80211_chan_def monitor_chandef;
/* extended capabilities provided by mac80211 */
u8 ext_capa[8];
+
+ /* TDLS channel switch */
+ struct work_struct tdls_chsw_work;
+ struct sk_buff_head skb_queue_tdls_chsw;
};
static inline struct ieee80211_sub_if_data *
@@ -1715,6 +1716,8 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band, bool nss_only);
void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap);
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+ u16 vht_mask[NL80211_VHT_NSS_MAX]);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1763,8 +1766,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
/* utility functions/constants */
extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
- enum nl80211_iftype type);
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
int rate, int erp, int short_preamble,
int shift);
@@ -2042,6 +2043,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
enum ieee80211_chanctx_mode chanmode,
u8 radar_detect);
int ieee80211_max_num_channels(struct ieee80211_local *local);
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx);
/* TDLS */
int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
@@ -2058,8 +2062,8 @@ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb);
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_tdls_chsw_work(struct work_struct *wk);
extern const struct ethtool_ops ieee80211_ethtool_ops;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 553ac6dd4..6964fc6a8 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1242,8 +1242,6 @@ static void ieee80211_iface_work(struct work_struct *work)
WLAN_BACK_RECIPIENT, 0,
false);
mutex_unlock(&local->sta_mtx);
- } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) {
- ieee80211_process_tdls_channel_switch(sdata, skb);
} else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK) {
int len = skb->len;
@@ -1790,13 +1788,23 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sband = local->hw.wiphy->bands[i];
sdata->rc_rateidx_mask[i] =
sband ? (1 << sband->n_bitrates) - 1 : 0;
- if (sband)
+ if (sband) {
+ __le16 cap;
+ u16 *vht_rate_mask;
+
memcpy(sdata->rc_rateidx_mcs_mask[i],
sband->ht_cap.mcs.rx_mask,
sizeof(sdata->rc_rateidx_mcs_mask[i]));
- else
+
+ cap = sband->vht_cap.vht_mcs.rx_mcs_map;
+ vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i];
+ ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask);
+ } else {
memset(sdata->rc_rateidx_mcs_mask[i], 0,
sizeof(sdata->rc_rateidx_mcs_mask[i]));
+ memset(sdata->rc_rateidx_vht_mcs_mask[i], 0,
+ sizeof(sdata->rc_rateidx_vht_mcs_mask[i]));
+ }
}
ieee80211_set_default_queues(sdata);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index b22df3a79..44388d6a1 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -336,7 +336,6 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
ieee80211_check_fast_xmit(sta);
} else {
rcu_assign_pointer(sta->gtk[idx], new);
- sta->gtk_idx = idx;
}
} else {
defunikey = old &&
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 3f4f9eaac..9951ef063 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -115,9 +115,6 @@ struct ieee80211_key {
} gen;
} u;
- /* number of times this key has been used */
- int tx_rx_count;
-
#ifdef CONFIG_MAC80211_DEBUGFS
struct {
struct dentry *stalink;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 3c63468b4..ff79a13d2 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -629,6 +629,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
+ INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
+
spin_lock_init(&local->ack_status_lock);
idr_init(&local->ack_status_frames);
@@ -645,6 +647,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
+ skb_queue_head_init(&local->skb_queue_tdls_chsw);
ieee80211_alloc_led_names(local);
@@ -1132,18 +1135,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_register_hw);
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
- struct net_device *napi_dev,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- struct ieee80211_local *local = hw_to_local(hw);
-
- netif_napi_add(napi_dev, napi, poll, weight);
- local->napi = napi;
-}
-EXPORT_SYMBOL_GPL(ieee80211_napi_add);
-
void ieee80211_unregister_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -1173,6 +1164,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
+ cancel_work_sync(&local->tdls_chsw_work);
flush_work(&local->sched_scan_stopped_work);
ieee80211_clear_tx_pending(local);
@@ -1183,6 +1175,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
+ skb_queue_purge(&local->skb_queue_tdls_chsw);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 817098add..e06a5ca7c 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -158,7 +158,7 @@ void mesh_sta_cleanup(struct sta_info *sta)
changed = mesh_accept_plinks_update(sdata);
if (!sdata->u.mesh.user_mpm) {
changed |= mesh_plink_deactivate(sta);
- del_timer_sync(&sta->plink_timer);
+ del_timer_sync(&sta->mesh->plink_timer);
}
if (changed)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 085edc1d0..d80e0a4c1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -19,15 +19,6 @@
#define MAX_PREQ_QUEUE_LEN 64
-/* Destination only */
-#define MP_F_DO 0x1
-/* Reply and forward */
-#define MP_F_RF 0x2
-/* Unknown Sequence Number */
-#define MP_F_USN 0x01
-/* Reason code Present */
-#define MP_F_RCODE 0x02
-
static void mesh_queue_preq(struct mesh_path *, u8);
static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
@@ -79,6 +70,12 @@ static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
#define MSEC_TO_TU(x) (x*1000/1024)
#define SN_GT(x, y) ((s32)(y - x) < 0)
#define SN_LT(x, y) ((s32)(x - y) < 0)
+#define MAX_SANE_SN_DELTA 32
+
+static inline u32 SN_DELTA(u32 x, u32 y)
+{
+ return x >= y ? x - y : y - x;
+}
#define net_traversal_jiffies(s) \
msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -279,15 +276,10 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
*pos++ = ttl;
/* number of destinations */
*pos++ = 1;
- /*
- * flags bit, bit 1 is unset if we know the sequence number and
- * bit 2 is set if we have a reason code
+ /* Flags field has AE bit only as defined in
+ * sec 8.4.2.117 IEEE802.11-2012
*/
*pos = 0;
- if (!target_sn)
- *pos |= MP_F_USN;
- if (target_rcode)
- *pos |= MP_F_RCODE;
pos++;
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
@@ -316,8 +308,9 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
/* moving average, scaled to 100 */
- sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
- if (sta->fail_avg > 95)
+ sta->mesh->fail_avg =
+ ((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
+ if (sta->mesh->fail_avg > 95)
mesh_plink_broken(sta);
}
@@ -333,7 +326,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
u32 tx_time, estimated_retx;
u64 result;
- if (sta->fail_avg >= 100)
+ if (sta->mesh->fail_avg >= 100)
return MAX_METRIC;
sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
@@ -341,7 +334,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
if (WARN_ON(!rate))
return MAX_METRIC;
- err = (sta->fail_avg << ARITH_SHIFT) / 100;
+ err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
/* bitrate is in units of 100 Kbps, while we need rate in units of
* 1Mbps. This will be corrected on tx_time computation.
@@ -441,6 +434,26 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
process = false;
fresh_info = false;
}
+ } else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
+ bool have_sn, newer_sn, bounced;
+
+ have_sn = mpath->flags & MESH_PATH_SN_VALID;
+ newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
+ bounced = have_sn &&
+ (SN_DELTA(orig_sn, mpath->sn) >
+ MAX_SANE_SN_DELTA);
+
+ if (!have_sn || newer_sn) {
+ /* if SN is newer than what we had
+ * then we can take it */;
+ } else if (bounced) {
+ /* if SN is way different than what
+ * we had then assume the other side
+ * rebooted or restarted */;
+ } else {
+ process = false;
+ fresh_info = false;
+ }
}
} else {
mpath = mesh_path_add(sdata, orig_addr);
@@ -570,15 +583,13 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
SN_LT(mpath->sn, target_sn)) {
mpath->sn = target_sn;
mpath->flags |= MESH_PATH_SN_VALID;
- } else if ((!(target_flags & MP_F_DO)) &&
+ } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
(mpath->flags & MESH_PATH_ACTIVE)) {
reply = true;
target_metric = mpath->metric;
target_sn = mpath->sn;
- if (target_flags & MP_F_RF)
- target_flags |= MP_F_DO;
- else
- forward = false;
+ /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
+ target_flags |= IEEE80211_PREQ_TO_FLAG;
}
}
rcu_read_unlock();
@@ -736,9 +747,12 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
if (mpath->flags & MESH_PATH_ACTIVE &&
ether_addr_equal(ta, sta->sta.addr) &&
(!(mpath->flags & MESH_PATH_SN_VALID) ||
- SN_GT(target_sn, mpath->sn))) {
+ SN_GT(target_sn, mpath->sn) || target_sn == 0)) {
mpath->flags &= ~MESH_PATH_ACTIVE;
- mpath->sn = target_sn;
+ if (target_sn != 0)
+ mpath->sn = target_sn;
+ else
+ mpath->sn += 1;
spin_unlock_bh(&mpath->state_lock);
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto endperr;
@@ -862,7 +876,7 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
- if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+ if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
rcu_read_unlock();
return;
}
@@ -974,7 +988,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_preq_queue *preq_node;
struct mesh_path *mpath;
- u8 ttl, target_flags;
+ u8 ttl, target_flags = 0;
const u8 *da;
u32 lifetime;
@@ -1033,9 +1047,9 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
}
if (preq_node->flags & PREQ_Q_F_REFRESH)
- target_flags = MP_F_DO;
+ target_flags |= IEEE80211_PREQ_TO_FLAG;
else
- target_flags = MP_F_RF;
+ target_flags &= ~IEEE80211_PREQ_TO_FLAG;
spin_unlock_bh(&mpath->state_lock);
da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
@@ -1176,7 +1190,9 @@ void mesh_path_timer(unsigned long data)
spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
- mpath->flags = 0;
+ mpath->flags &= ~(MESH_PATH_RESOLVING |
+ MESH_PATH_RESOLVED |
+ MESH_PATH_REQ_QUEUED);
mpath->exp_time = jiffies;
spin_unlock_bh(&mpath->state_lock);
if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3b5909941..58384642e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -13,10 +13,11 @@
#include "rate.h"
#include "mesh.h"
+#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2)
#define PLINK_GET_LLID(p) (p + 2)
#define PLINK_GET_PLID(p) (p + 4)
-#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
+#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
jiffies + msecs_to_jiffies(t)))
enum plink_event {
@@ -53,18 +54,13 @@ static const char * const mplevents[] = {
[CLS_IGNR] = "CLS_IGNR"
};
-static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_self_protected_actioncode action,
- u8 *da, u16 llid, u16 plid, u16 reason);
-
-
/* We only need a valid sta if user configured a minimum rssi_threshold. */
static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
return rssi_threshold == 0 ||
- (sta && (s8) -ewma_read(&sta->avg_signal) > rssi_threshold);
+ (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
}
/**
@@ -72,14 +68,14 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
*
* @sta: mesh peer link to restart
*
- * Locking: this function must be called holding sta->plink_lock
+ * Locking: this function must be called holding sta->mesh->plink_lock
*/
static inline void mesh_plink_fsm_restart(struct sta_info *sta)
{
- lockdep_assert_held(&sta->plink_lock);
- sta->plink_state = NL80211_PLINK_LISTEN;
- sta->llid = sta->plid = sta->reason = 0;
- sta->plink_retries = 0;
+ lockdep_assert_held(&sta->mesh->plink_lock);
+ sta->mesh->plink_state = NL80211_PLINK_LISTEN;
+ sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0;
+ sta->mesh->plink_retries = 0;
}
/*
@@ -119,7 +115,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata ||
- sta->plink_state != NL80211_PLINK_ESTAB)
+ sta->mesh->plink_state != NL80211_PLINK_ESTAB)
continue;
short_slot = false;
@@ -169,7 +165,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
if (sdata != sta->sdata ||
- sta->plink_state != NL80211_PLINK_ESTAB)
+ sta->mesh->plink_state != NL80211_PLINK_ESTAB)
continue;
if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
@@ -204,59 +200,8 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
return BSS_CHANGED_HT;
}
-/**
- * __mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- * Returns beacon changed flag if the beacon content changed.
- *
- * Locking: the caller must hold sta->plink_lock
- */
-static u32 __mesh_plink_deactivate(struct sta_info *sta)
-{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 changed = 0;
-
- lockdep_assert_held(&sta->plink_lock);
-
- if (sta->plink_state == NL80211_PLINK_ESTAB)
- changed = mesh_plink_dec_estab_count(sdata);
- sta->plink_state = NL80211_PLINK_BLOCKED;
- mesh_path_flush_by_nexthop(sta);
-
- ieee80211_mps_sta_status_update(sta);
- changed |= ieee80211_mps_set_sta_local_pm(sta,
- NL80211_MESH_POWER_UNKNOWN);
-
- return changed;
-}
-
-/**
- * mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- */
-u32 mesh_plink_deactivate(struct sta_info *sta)
-{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- u32 changed;
-
- spin_lock_bh(&sta->plink_lock);
- changed = __mesh_plink_deactivate(sta);
- sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
- mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
- sta->sta.addr, sta->llid, sta->plid,
- sta->reason);
- spin_unlock_bh(&sta->plink_lock);
-
- return changed;
-}
-
static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
enum ieee80211_self_protected_actioncode action,
u8 *da, u16 llid, u16 plid, u16 reason)
{
@@ -306,7 +251,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
/* AID */
pos = skb_put(skb, 2);
- put_unaligned_le16(plid, pos);
+ put_unaligned_le16(sta->sta.aid, pos);
}
if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -375,6 +320,58 @@ free:
return err;
}
+/**
+ * __mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
+ *
+ * Locking: the caller must hold sta->mesh->plink_lock
+ */
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u32 changed = 0;
+
+ lockdep_assert_held(&sta->mesh->plink_lock);
+
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ changed = mesh_plink_dec_estab_count(sdata);
+ sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+ mesh_path_flush_by_nexthop(sta);
+
+ ieee80211_mps_sta_status_update(sta);
+ changed |= ieee80211_mps_set_sta_local_pm(sta,
+ NL80211_MESH_POWER_UNKNOWN);
+
+ return changed;
+}
+
+/**
+ * mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ */
+u32 mesh_plink_deactivate(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ u32 changed;
+
+ spin_lock_bh(&sta->mesh->plink_lock);
+ changed = __mesh_plink_deactivate(sta);
+ sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
+ mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
+ sta->mesh->reason);
+ spin_unlock_bh(&sta->mesh->plink_lock);
+
+ return changed;
+}
+
static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee802_11_elems *elems, bool insert)
@@ -388,13 +385,14 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
sband = local->hw.wiphy->bands[band];
rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
- spin_lock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
sta->last_rx = jiffies;
/* rates and capabilities don't change during peering */
- if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+ sta->mesh->processed_beacon)
goto out;
- sta->processed_beacon = true;
+ sta->mesh->processed_beacon = true;
if (sta->sta.supp_rates[band] != rates)
changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
@@ -421,23 +419,57 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
else
rate_control_rate_update(local, sband, sta, changed);
out:
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
+}
+
+static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ unsigned long *aid_map;
+ int aid;
+
+ aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1),
+ sizeof(*aid_map), GFP_KERNEL);
+ if (!aid_map)
+ return -ENOMEM;
+
+ /* reserve aid 0 for mcast indication */
+ __set_bit(0, aid_map);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list)
+ __set_bit(sta->sta.aid, aid_map);
+ rcu_read_unlock();
+
+ aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
+ kfree(aid_map);
+
+ if (aid > IEEE80211_MAX_AID)
+ return -ENOBUFS;
+
+ return aid;
}
static struct sta_info *
__mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
{
struct sta_info *sta;
+ int aid;
if (sdata->local->num_sta >= MESH_MAX_PLINKS)
return NULL;
+ aid = mesh_allocate_aid(sdata);
+ if (aid < 0)
+ return NULL;
+
sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
if (!sta)
return NULL;
- sta->plink_state = NL80211_PLINK_LISTEN;
+ sta->mesh->plink_state = NL80211_PLINK_LISTEN;
sta->sta.wme = true;
+ sta->sta.aid = aid;
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -524,7 +556,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
goto out;
if (mesh_peer_accepts_plinks(elems) &&
- sta->plink_state == NL80211_PLINK_LISTEN &&
+ sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
sdata->u.mesh.accepting_plinks &&
sdata->u.mesh.mshcfg.auto_open_plinks &&
rssi_threshold_check(sdata, sta))
@@ -554,52 +586,52 @@ static void mesh_plink_timer(unsigned long data)
if (sta->sdata->local->quiescing)
return;
- spin_lock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
/* If a timer fires just before a state transition on another CPU,
* we may have already extended the timeout and changed state by the
* time we've acquired the lock and arrived here. In that case,
* skip this timer and wait for the new one.
*/
- if (time_before(jiffies, sta->plink_timer.expires)) {
+ if (time_before(jiffies, sta->mesh->plink_timer.expires)) {
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer adjusted)",
- sta->sta.addr, mplstates[sta->plink_state]);
- spin_unlock_bh(&sta->plink_lock);
+ sta->sta.addr, mplstates[sta->mesh->plink_state]);
+ spin_unlock_bh(&sta->mesh->plink_lock);
return;
}
/* del_timer() and handler may race when entering these states */
- if (sta->plink_state == NL80211_PLINK_LISTEN ||
- sta->plink_state == NL80211_PLINK_ESTAB) {
+ if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
+ sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer deleted)",
- sta->sta.addr, mplstates[sta->plink_state]);
- spin_unlock_bh(&sta->plink_lock);
+ sta->sta.addr, mplstates[sta->mesh->plink_state]);
+ spin_unlock_bh(&sta->mesh->plink_lock);
return;
}
mpl_dbg(sta->sdata,
"Mesh plink timer for %pM fired on state %s\n",
- sta->sta.addr, mplstates[sta->plink_state]);
+ sta->sta.addr, mplstates[sta->mesh->plink_state]);
sdata = sta->sdata;
mshcfg = &sdata->u.mesh.mshcfg;
- switch (sta->plink_state) {
+ switch (sta->mesh->plink_state) {
case NL80211_PLINK_OPN_RCVD:
case NL80211_PLINK_OPN_SNT:
/* retry timer */
- if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
+ if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) {
u32 rand;
mpl_dbg(sta->sdata,
"Mesh plink for %pM (retry, timeout): %d %d\n",
- sta->sta.addr, sta->plink_retries,
- sta->plink_timeout);
+ sta->sta.addr, sta->mesh->plink_retries,
+ sta->mesh->plink_timeout);
get_random_bytes(&rand, sizeof(u32));
- sta->plink_timeout = sta->plink_timeout +
- rand % sta->plink_timeout;
- ++sta->plink_retries;
- mod_plink_timer(sta, sta->plink_timeout);
+ sta->mesh->plink_timeout = sta->mesh->plink_timeout +
+ rand % sta->mesh->plink_timeout;
+ ++sta->mesh->plink_retries;
+ mod_plink_timer(sta, sta->mesh->plink_timeout);
action = WLAN_SP_MESH_PEERING_OPEN;
break;
}
@@ -609,31 +641,31 @@ static void mesh_plink_timer(unsigned long data)
/* confirm timer */
if (!reason)
reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT;
- sta->plink_state = NL80211_PLINK_HOLDING;
+ sta->mesh->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
action = WLAN_SP_MESH_PEERING_CLOSE;
break;
case NL80211_PLINK_HOLDING:
/* holding timer */
- del_timer(&sta->plink_timer);
+ del_timer(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
default:
break;
}
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
if (action)
- mesh_plink_frame_tx(sdata, action, sta->sta.addr,
- sta->llid, sta->plid, reason);
+ mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+ sta->mesh->llid, sta->mesh->plid, reason);
}
static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
{
- sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
- sta->plink_timer.data = (unsigned long) sta;
- sta->plink_timer.function = mesh_plink_timer;
- sta->plink_timeout = timeout;
- add_timer(&sta->plink_timer);
+ sta->mesh->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
+ sta->mesh->plink_timer.data = (unsigned long) sta;
+ sta->mesh->plink_timer.function = mesh_plink_timer;
+ sta->mesh->plink_timeout = timeout;
+ add_timer(&sta->mesh->plink_timer);
}
static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
@@ -645,7 +677,7 @@ static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (!memcmp(&sta->llid, &llid, sizeof(llid))) {
+ if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) {
in_use = true;
break;
}
@@ -661,8 +693,6 @@ static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata)
do {
get_random_bytes(&llid, sizeof(llid));
- /* for mesh PS we still only have the AID range for TIM bits */
- llid = (llid % IEEE80211_MAX_AID) + 1;
} while (llid_in_use(sdata, llid));
return llid;
@@ -676,16 +706,16 @@ u32 mesh_plink_open(struct sta_info *sta)
if (!test_sta_flag(sta, WLAN_STA_AUTH))
return 0;
- spin_lock_bh(&sta->plink_lock);
- sta->llid = mesh_get_new_llid(sdata);
- if (sta->plink_state != NL80211_PLINK_LISTEN &&
- sta->plink_state != NL80211_PLINK_BLOCKED) {
- spin_unlock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
+ sta->mesh->llid = mesh_get_new_llid(sdata);
+ if (sta->mesh->plink_state != NL80211_PLINK_LISTEN &&
+ sta->mesh->plink_state != NL80211_PLINK_BLOCKED) {
+ spin_unlock_bh(&sta->mesh->plink_lock);
return 0;
}
- sta->plink_state = NL80211_PLINK_OPN_SNT;
+ sta->mesh->plink_state = NL80211_PLINK_OPN_SNT;
mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
mpl_dbg(sdata,
"Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
@@ -693,8 +723,8 @@ u32 mesh_plink_open(struct sta_info *sta)
/* set the non-peer mode to active during peering */
changed = ieee80211_mps_local_status_update(sdata);
- mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
- sta->sta.addr, sta->llid, 0, 0);
+ mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, sta->mesh->llid, 0, 0);
return changed;
}
@@ -702,10 +732,10 @@ u32 mesh_plink_block(struct sta_info *sta)
{
u32 changed;
- spin_lock_bh(&sta->plink_lock);
+ spin_lock_bh(&sta->mesh->plink_lock);
changed = __mesh_plink_deactivate(sta);
- sta->plink_state = NL80211_PLINK_BLOCKED;
- spin_unlock_bh(&sta->plink_lock);
+ sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+ spin_unlock_bh(&sta->mesh->plink_lock);
return changed;
}
@@ -715,12 +745,11 @@ static void mesh_plink_close(struct ieee80211_sub_if_data *sdata,
enum plink_event event)
{
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
-
u16 reason = (event == CLS_ACPT) ?
WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG;
- sta->reason = reason;
- sta->plink_state = NL80211_PLINK_HOLDING;
+ sta->mesh->reason = reason;
+ sta->mesh->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
}
@@ -730,8 +759,8 @@ static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata,
struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
u32 changed = 0;
- del_timer(&sta->plink_timer);
- sta->plink_state = NL80211_PLINK_ESTAB;
+ del_timer(&sta->mesh->plink_timer);
+ sta->mesh->plink_state = NL80211_PLINK_ESTAB;
changed |= mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= mesh_set_short_slot_time(sdata);
@@ -758,18 +787,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
u32 changed = 0;
mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
- mplstates[sta->plink_state], mplevents[event]);
+ mplstates[sta->mesh->plink_state], mplevents[event]);
- spin_lock_bh(&sta->plink_lock);
- switch (sta->plink_state) {
+ spin_lock_bh(&sta->mesh->plink_lock);
+ switch (sta->mesh->plink_state) {
case NL80211_PLINK_LISTEN:
switch (event) {
case CLS_ACPT:
mesh_plink_fsm_restart(sta);
break;
case OPN_ACPT:
- sta->plink_state = NL80211_PLINK_OPN_RCVD;
- sta->llid = mesh_get_new_llid(sdata);
+ sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
+ sta->mesh->llid = mesh_get_new_llid(sdata);
mesh_plink_timer_set(sta,
mshcfg->dot11MeshRetryTimeout);
@@ -791,11 +820,11 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
break;
case OPN_ACPT:
/* retry timer is left untouched */
- sta->plink_state = NL80211_PLINK_OPN_RCVD;
+ sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
action = WLAN_SP_MESH_PEERING_CONFIRM;
break;
case CNF_ACPT:
- sta->plink_state = NL80211_PLINK_CNF_RCVD;
+ sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD;
mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
break;
default:
@@ -855,7 +884,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
case NL80211_PLINK_HOLDING:
switch (event) {
case CLS_ACPT:
- del_timer(&sta->plink_timer);
+ del_timer(&sta->mesh->plink_timer);
mesh_plink_fsm_restart(sta);
break;
case OPN_ACPT:
@@ -874,17 +903,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
*/
break;
}
- spin_unlock_bh(&sta->plink_lock);
+ spin_unlock_bh(&sta->mesh->plink_lock);
if (action) {
- mesh_plink_frame_tx(sdata, action, sta->sta.addr,
- sta->llid, sta->plid, sta->reason);
+ mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+ sta->mesh->llid, sta->mesh->plid,
+ sta->mesh->reason);
/* also send confirm in open case */
if (action == WLAN_SP_MESH_PEERING_OPEN) {
- mesh_plink_frame_tx(sdata,
+ mesh_plink_frame_tx(sdata, sta,
WLAN_SP_MESH_PEERING_CONFIRM,
- sta->sta.addr, sta->llid,
- sta->plid, 0);
+ sta->sta.addr, sta->mesh->llid,
+ sta->mesh->plid, 0);
}
}
@@ -939,7 +969,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
goto out;
}
- if (sta->plink_state == NL80211_PLINK_BLOCKED)
+ if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED)
goto out;
}
@@ -954,7 +984,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
if (!matches_local)
event = OPN_RJCT;
if (!mesh_plink_free_count(sdata) ||
- (sta->plid && sta->plid != plid))
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
@@ -963,14 +993,14 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
if (!matches_local)
event = CNF_RJCT;
if (!mesh_plink_free_count(sdata) ||
- sta->llid != llid ||
- (sta->plid && sta->plid != plid))
+ sta->mesh->llid != llid ||
+ (sta->mesh->plid && sta->mesh->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
break;
case WLAN_SP_MESH_PEERING_CLOSE:
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
/* Do not check for llid or plid. This does not
* follow the standard but since multiple plinks
* per sta are not supported, it is necessary in
@@ -981,9 +1011,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
* restarted.
*/
event = CLS_ACPT;
- else if (sta->plid != plid)
+ else if (sta->mesh->plid != plid)
event = CLS_IGNR;
- else if (ie_len == 8 && sta->llid != llid)
+ else if (ie_len == 8 && sta->mesh->llid != llid)
event = CLS_IGNR;
else
event = CLS_ACPT;
@@ -1070,9 +1100,9 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
goto unlock_rcu;
}
- sta->plid = plid;
+ sta->mesh->plid = plid;
} else if (!sta && event == OPN_RJCT) {
- mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE,
mgmt->sa, 0, plid,
WLAN_REASON_MESH_CONFIG);
goto unlock_rcu;
@@ -1081,9 +1111,13 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
goto unlock_rcu;
}
- /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
- if (!sta->plid && event == CNF_ACPT)
- sta->plid = plid;
+ if (event == CNF_ACPT) {
+ /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+ if (!sta->mesh->plid)
+ sta->mesh->plid = plid;
+
+ sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt));
+ }
changed |= mesh_plink_fsm(sdata, sta, event);
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index ad8b377b4..90a268abe 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -92,16 +92,16 @@ u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata)
if (sdata != sta->sdata)
continue;
- switch (sta->plink_state) {
+ switch (sta->mesh->plink_state) {
case NL80211_PLINK_OPN_SNT:
case NL80211_PLINK_OPN_RCVD:
case NL80211_PLINK_CNF_RCVD:
peering = true;
break;
case NL80211_PLINK_ESTAB:
- if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
+ if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
light_sleep_cnt++;
- else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
+ else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
deep_sleep_cnt++;
break;
default:
@@ -153,19 +153,19 @@ u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta,
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- if (sta->local_pm == pm)
+ if (sta->mesh->local_pm == pm)
return 0;
mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
pm, sta->sta.addr);
- sta->local_pm = pm;
+ sta->mesh->local_pm = pm;
/*
* announce peer-specific power mode transition
* (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
*/
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
mps_qos_null_tx(sta);
return ieee80211_mps_local_status_update(sdata);
@@ -197,8 +197,8 @@ void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata,
if (is_unicast_ether_addr(hdr->addr1) &&
ieee80211_is_data_qos(hdr->frame_control) &&
- sta->plink_state == NL80211_PLINK_ESTAB)
- pm = sta->local_pm;
+ sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+ pm = sta->mesh->local_pm;
else
pm = sdata->u.mesh.nonpeer_pm;
@@ -241,16 +241,16 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta)
* use peer-specific power mode if peering is established and the
* peer's power mode is known
*/
- if (sta->plink_state == NL80211_PLINK_ESTAB &&
- sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
- pm = sta->peer_pm;
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+ sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN)
+ pm = sta->mesh->peer_pm;
else
- pm = sta->nonpeer_pm;
+ pm = sta->mesh->nonpeer_pm;
do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
/* clear the MPSP flags for non-peers or active STA */
- if (sta->plink_state != NL80211_PLINK_ESTAB) {
+ if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
} else if (!do_buffer) {
@@ -296,13 +296,13 @@ static void mps_set_sta_peer_pm(struct sta_info *sta,
pm = NL80211_MESH_POWER_ACTIVE;
}
- if (sta->peer_pm == pm)
+ if (sta->mesh->peer_pm == pm)
return;
mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
sta->sta.addr, pm);
- sta->peer_pm = pm;
+ sta->mesh->peer_pm = pm;
ieee80211_mps_sta_status_update(sta);
}
@@ -317,13 +317,13 @@ static void mps_set_sta_nonpeer_pm(struct sta_info *sta,
else
pm = NL80211_MESH_POWER_ACTIVE;
- if (sta->nonpeer_pm == pm)
+ if (sta->mesh->nonpeer_pm == pm)
return;
mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
sta->sta.addr, pm);
- sta->nonpeer_pm = pm;
+ sta->mesh->nonpeer_pm = pm;
ieee80211_mps_sta_status_update(sta);
}
@@ -552,7 +552,7 @@ void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta,
} else {
if (eosp)
clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
- else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
+ else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE)
set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
@@ -577,9 +577,9 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
int ac, buffer_local = 0;
bool has_buffered = false;
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
- sta->llid);
+ sta->mesh->aid);
if (has_buffered)
mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
@@ -598,7 +598,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
if (!has_buffered && !buffer_local)
return;
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
mpsp_trigger_send(sta, has_buffered, !buffer_local);
else
mps_frame_deliver(sta, 1);
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 09625d620..64bc22ad9 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -127,14 +127,14 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
/* Timing offset calculation (see 13.13.2.2.2) */
t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
- sta->t_offset = t_t - t_r;
+ sta->mesh->t_offset = t_t - t_r;
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
- s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset;
+ s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset;
msync_dbg(sdata,
- "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
- sta->sta.addr, (long long) sta->t_offset,
- (long long) sta->t_offset_setpoint,
+ "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n",
+ sta->sta.addr, (long long) sta->mesh->t_offset,
+ (long long) sta->mesh->t_offset_setpoint,
(long long) t_clockdrift);
if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
@@ -152,12 +152,12 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
ifmsh->sync_offset_clockdrift_max = t_clockdrift;
spin_unlock_bh(&ifmsh->sync_offset_lock);
} else {
- sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+ sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN;
set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
msync_dbg(sdata,
- "STA %pM : offset was invalid, sta->t_offset=%lld\n",
+ "STA %pM : offset was invalid, t_offset=%lld\n",
sta->sta.addr,
- (long long) sta->t_offset);
+ (long long) sta->mesh->t_offset);
}
no_sync:
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9b2cc278a..d011bc539 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -6,6 +6,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -538,11 +539,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
}
+/* This function determines vht capability flags for the association
+ * and builds the IE.
+ * Note - the function may set the owner of the MU-MIMO capability
+ */
static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
struct ieee80211_vht_cap *ap_vht_cap)
{
+ struct ieee80211_local *local = sdata->local;
u8 *pos;
u32 cap;
struct ieee80211_sta_vht_cap vht_cap;
@@ -576,7 +582,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
*/
if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
- cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ else if (!(ap_vht_cap->vht_cap_info &
+ cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+ cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ /*
+ * If some other vif is using the MU-MIMO capablity we cannot associate
+ * using MU-MIMO - this will lead to contradictions in the group-id
+ * mechanism.
+ * Ownership is defined since association request, in order to avoid
+ * simultaneous associations with MU-MIMO.
+ */
+ if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
+ bool disable_mu_mimo = false;
+ struct ieee80211_sub_if_data *other;
+
+ list_for_each_entry_rcu(other, &local->interfaces, list) {
+ if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) {
+ disable_mu_mimo = true;
+ break;
+ }
+ }
+ if (disable_mu_mimo)
+ cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+ else
+ sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER;
+ }
mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
@@ -1096,24 +1129,6 @@ static void ieee80211_chswitch_timer(unsigned long data)
ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
}
-static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
-{
- struct sta_info *sta;
- u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
- if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
- !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
- continue;
-
- ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
- NL80211_TDLS_TEARDOWN, reason,
- GFP_ATOMIC);
- }
- rcu_read_unlock();
-}
-
static void
ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
u64 timestamp, u32 device_timestamp,
@@ -2076,6 +2091,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+ sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2538,6 +2554,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
eth_zero_addr(sdata->u.mgd.bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
+ sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->mtx);
@@ -3034,12 +3051,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
rate_control_rate_init(sta);
- if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
+ if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
set_sta_flag(sta, WLAN_STA_MFP);
- sta->sta.mfp = true;
- } else {
- sta->sta.mfp = false;
- }
sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
@@ -3378,7 +3391,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
- int sig = ifmgd->ave_beacon_signal;
+ int sig = ifmgd->ave_beacon_signal / 16;
int last_sig = ifmgd->last_ave_beacon_signal;
struct ieee80211_event event = {
.type = RSSI_EVENT,
@@ -4254,6 +4267,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
int ret;
+ u32 i;
+ bool have_80mhz;
sband = local->hw.wiphy->bands[cbss->channel->band];
@@ -4304,6 +4319,20 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
}
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ have_80mhz = false;
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
+
+ have_80mhz = true;
+ break;
+ }
+
+ if (!have_80mhz)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
ht_cap, ht_oper, vht_oper,
@@ -4999,6 +5028,25 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
return 0;
}
+ if (ifmgd->assoc_data &&
+ ether_addr_equal(ifmgd->assoc_data->bss->bssid, req->bssid)) {
+ sdata_info(sdata,
+ "aborting association with %pM by local choice (Reason: %u=%s)\n",
+ req->bssid, req->reason_code,
+ ieee80211_get_reason_code_string(req->reason_code));
+
+ drv_mgd_prepare_tx(sdata->local, sdata);
+ ieee80211_send_deauth_disassoc(sdata, req->bssid,
+ IEEE80211_STYPE_DEAUTH,
+ req->reason_code, tx,
+ frame_buf);
+ ieee80211_destroy_assoc_data(sdata, false);
+ ieee80211_report_disconnect(sdata, frame_buf,
+ sizeof(frame_buf), true,
+ req->reason_code);
+ return 0;
+ }
+
if (ifmgd->associated &&
ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
sdata_info(sdata,
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 358d5f9d8..573b81a1f 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -179,7 +179,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
- u32 changed = BSS_CHANGED_OCB;
+ u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
int err;
if (ifocb->joined == true)
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index fda33f961..9ce8883d5 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -29,6 +29,65 @@ module_param(ieee80211_default_rc_algo, charp, 0644);
MODULE_PARM_DESC(ieee80211_default_rc_algo,
"Default rate control algorithm for mac80211 to use");
+void rate_control_rate_init(struct sta_info *sta)
+{
+ struct ieee80211_local *local = sta->sdata->local;
+ struct rate_control_ref *ref = sta->rate_ctrl;
+ struct ieee80211_sta *ista = &sta->sta;
+ void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ ieee80211_sta_set_rx_nss(sta);
+
+ if (!ref)
+ return;
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+
+ spin_lock_bh(&sta->rate_ctrl_lock);
+ ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+ priv_sta);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
+ rcu_read_unlock();
+ set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
+}
+
+void rate_control_rate_update(struct ieee80211_local *local,
+ struct ieee80211_supported_band *sband,
+ struct sta_info *sta, u32 changed)
+{
+ struct rate_control_ref *ref = local->rate_ctrl;
+ struct ieee80211_sta *ista = &sta->sta;
+ void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ if (ref && ref->ops->rate_update) {
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&sta->rate_ctrl_lock);
+ ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+ ista, priv_sta, changed);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
+ rcu_read_unlock();
+ }
+ drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+}
+
int ieee80211_rate_control_register(const struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
@@ -294,39 +353,37 @@ bool rate_control_send_low(struct ieee80211_sta *pubsta,
}
EXPORT_SYMBOL(rate_control_send_low);
-static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
- int n_bitrates, u32 mask)
+static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask)
{
int j;
/* See whether the selected rate or anything below it is allowed. */
- for (j = rate->idx; j >= 0; j--) {
+ for (j = *rate_idx; j >= 0; j--) {
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
- rate->idx = j;
+ *rate_idx = j;
return true;
}
}
/* Try to find a higher rate that would be allowed */
- for (j = rate->idx + 1; j < n_bitrates; j++) {
+ for (j = *rate_idx + 1; j < n_bitrates; j++) {
if (mask & (1 << j)) {
/* Okay, found a suitable rate. Use it. */
- rate->idx = j;
+ *rate_idx = j;
return true;
}
}
return false;
}
-static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
- u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask)
{
int i, j;
int ridx, rbit;
- ridx = rate->idx / 8;
- rbit = rate->idx % 8;
+ ridx = *rate_idx / 8;
+ rbit = *rate_idx % 8;
/* sanity check */
if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
@@ -336,20 +393,20 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
for (i = ridx; i >= 0; i--) {
for (j = rbit; j >= 0; j--)
if (mcs_mask[i] & BIT(j)) {
- rate->idx = i * 8 + j;
+ *rate_idx = i * 8 + j;
return true;
}
rbit = 7;
}
/* Try to find a higher rate that would be allowed */
- ridx = (rate->idx + 1) / 8;
- rbit = (rate->idx + 1) % 8;
+ ridx = (*rate_idx + 1) / 8;
+ rbit = (*rate_idx + 1) % 8;
for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
for (j = rbit; j < 8; j++)
if (mcs_mask[i] & BIT(j)) {
- rate->idx = i * 8 + j;
+ *rate_idx = i * 8 + j;
return true;
}
rbit = 0;
@@ -357,37 +414,93 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
return false;
}
+static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask)
+{
+ int i, j;
+ int ridx, rbit;
+
+ ridx = *rate_idx >> 4;
+ rbit = *rate_idx & 0xf;
+
+ if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX)
+ return false;
+
+ /* See whether the selected rate or anything below it is allowed. */
+ for (i = ridx; i >= 0; i--) {
+ for (j = rbit; j >= 0; j--) {
+ if (vht_mask[i] & BIT(j)) {
+ *rate_idx = (i << 4) | j;
+ return true;
+ }
+ }
+ rbit = 15;
+ }
+
+ /* Try to find a higher rate that would be allowed */
+ ridx = (*rate_idx + 1) >> 4;
+ rbit = (*rate_idx + 1) & 0xf;
+ for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) {
+ for (j = rbit; j < 16; j++) {
+ if (vht_mask[i] & BIT(j)) {
+ *rate_idx = (i << 4) | j;
+ return true;
+ }
+ }
+ rbit = 0;
+ }
+ return false;
+}
-static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags,
struct ieee80211_supported_band *sband,
enum nl80211_chan_width chan_width,
u32 mask,
- u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+ u16 vht_mask[NL80211_VHT_NSS_MAX])
{
- struct ieee80211_tx_rate alt_rate;
+ if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+ /* handle VHT rates */
+ if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask))
+ return;
+
+ *rate_idx = 0;
+ /* keep protection flags */
+ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+
+ *rate_flags |= IEEE80211_TX_RC_MCS;
+ if (chan_width == NL80211_CHAN_WIDTH_40)
+ *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- /* handle HT rates */
- if (rate->flags & IEEE80211_TX_RC_MCS) {
- if (rate_idx_match_mcs_mask(rate, mcs_mask))
+ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
return;
/* also try the legacy rates. */
- alt_rate.idx = 0;
+ *rate_flags &= ~(IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_40_MHZ_WIDTH);
+ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+ mask))
+ return;
+ } else if (*rate_flags & IEEE80211_TX_RC_MCS) {
+ /* handle HT rates */
+ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
+ return;
+
+ /* also try the legacy rates. */
+ *rate_idx = 0;
/* keep protection flags */
- alt_rate.flags = rate->flags &
- (IEEE80211_TX_RC_USE_RTS_CTS |
- IEEE80211_TX_RC_USE_CTS_PROTECT |
- IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
- alt_rate.count = rate->count;
- if (rate_idx_match_legacy_mask(&alt_rate,
- sband->n_bitrates, mask)) {
- *rate = alt_rate;
+ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+ mask))
return;
- }
- } else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) {
+ } else {
/* handle legacy rates */
- if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask))
+ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+ mask))
return;
/* if HT BSS, and we handle a data frame, also try HT rates */
@@ -400,23 +513,19 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
break;
}
- alt_rate.idx = 0;
+ *rate_idx = 0;
/* keep protection flags */
- alt_rate.flags = rate->flags &
- (IEEE80211_TX_RC_USE_RTS_CTS |
- IEEE80211_TX_RC_USE_CTS_PROTECT |
- IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
- alt_rate.count = rate->count;
+ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT |
+ IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
- alt_rate.flags |= IEEE80211_TX_RC_MCS;
+ *rate_flags |= IEEE80211_TX_RC_MCS;
if (chan_width == NL80211_CHAN_WIDTH_40)
- alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
- *rate = alt_rate;
+ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
return;
- }
}
/*
@@ -569,18 +678,92 @@ static void rate_control_fill_sta_table(struct ieee80211_sta *sta,
}
}
+static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, u32 *mask,
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+ u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+ u32 i, flags;
+
+ *mask = sdata->rc_rateidx_mask[sband->band];
+ flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((flags & sband->bitrates[i].flags) != flags)
+ *mask &= ~BIT(i);
+ }
+
+ if (*mask == (1 << sband->n_bitrates) - 1 &&
+ !sdata->rc_has_mcs_mask[sband->band] &&
+ !sdata->rc_has_vht_mcs_mask[sband->band])
+ return false;
+
+ if (sdata->rc_has_mcs_mask[sband->band])
+ memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band],
+ IEEE80211_HT_MCS_MASK_LEN);
+ else
+ memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN);
+
+ if (sdata->rc_has_vht_mcs_mask[sband->band])
+ memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band],
+ sizeof(u16) * NL80211_VHT_NSS_MAX);
+ else
+ memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+ if (sta) {
+ __le16 sta_vht_cap;
+ u16 sta_vht_mask[NL80211_VHT_NSS_MAX];
+
+ /* Filter out rates that the STA does not support */
+ *mask &= sta->supp_rates[sband->band];
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
+
+ sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
+ ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask);
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ vht_mask[i] &= sta_vht_mask[i];
+ }
+
+ return true;
+}
+
+static void
+rate_control_apply_mask_ratetbl(struct sta_info *sta,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta_rates *rates)
+{
+ int i;
+ u32 mask;
+ u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mask[NL80211_VHT_NSS_MAX];
+ enum nl80211_chan_width chan_width;
+
+ if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask,
+ mcs_mask, vht_mask))
+ return;
+
+ chan_width = sta->sdata->vif.bss_conf.chandef.width;
+ for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
+ if (rates->rate[i].idx < 0)
+ break;
+
+ rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags,
+ sband, chan_width, mask, mcs_mask,
+ vht_mask);
+ }
+}
+
static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
- struct ieee80211_tx_info *info,
struct ieee80211_tx_rate *rates,
int max_rates)
{
enum nl80211_chan_width chan_width;
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
- bool has_mcs_mask;
u32 mask;
- u32 rate_flags;
+ u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX];
int i;
/*
@@ -588,30 +771,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
* default mask (allow all rates) is used to save some processing for
* the common case.
*/
- mask = sdata->rc_rateidx_mask[info->band];
- has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
- rate_flags =
- ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
- for (i = 0; i < sband->n_bitrates; i++)
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- mask &= ~BIT(i);
-
- if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
+ if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask,
+ vht_mask))
return;
- if (has_mcs_mask)
- memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
- sizeof(mcs_mask));
- else
- memset(mcs_mask, 0xff, sizeof(mcs_mask));
-
- if (sta) {
- /* Filter out rates that the STA does not support */
- mask &= sta->supp_rates[info->band];
- for (i = 0; i < sizeof(mcs_mask); i++)
- mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
- }
-
/*
* Make sure the rate index selected for each TX rate is
* included in the configured mask and change the rate indexes
@@ -623,8 +786,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
if (rates[i].idx < 0)
break;
- rate_idx_match_mask(&rates[i], sband, chan_width, mask,
- mcs_mask);
+ rate_flags = rates[i].flags;
+ rate_idx_match_mask(&rates[i].idx, &rate_flags, sband,
+ chan_width, mask, mcs_mask, vht_mask);
+ rates[i].flags = rate_flags;
}
}
@@ -648,7 +813,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
sband = sdata->local->hw.wiphy->bands[info->band];
if (ieee80211_is_data(hdr->frame_control))
- rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
+ rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
if (dest[0].idx < 0)
__rate_control_send_low(&sdata->local->hw, sband, sta, info,
@@ -705,7 +870,10 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
struct ieee80211_sta_rates *old;
+ struct ieee80211_supported_band *sband;
+ sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)];
+ rate_control_apply_mask_ratetbl(sta, sband, rates);
/*
* mac80211 guarantees that this function will not be called
* concurrently, so the following RCU access is safe, even without
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 25c9be5dd..624fe5b81 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -71,64 +71,10 @@ rate_control_tx_status_noskb(struct ieee80211_local *local,
spin_unlock_bh(&sta->rate_ctrl_lock);
}
-static inline void rate_control_rate_init(struct sta_info *sta)
-{
- struct ieee80211_local *local = sta->sdata->local;
- struct rate_control_ref *ref = sta->rate_ctrl;
- struct ieee80211_sta *ista = &sta->sta;
- void *priv_sta = sta->rate_ctrl_priv;
- struct ieee80211_supported_band *sband;
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- ieee80211_sta_set_rx_nss(sta);
-
- if (!ref)
- return;
-
- rcu_read_lock();
-
- chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
- rcu_read_unlock();
- return;
- }
-
- sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
-
- spin_lock_bh(&sta->rate_ctrl_lock);
- ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
- priv_sta);
- spin_unlock_bh(&sta->rate_ctrl_lock);
- rcu_read_unlock();
- set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
-}
-
-static inline void rate_control_rate_update(struct ieee80211_local *local,
+void rate_control_rate_init(struct sta_info *sta);
+void rate_control_rate_update(struct ieee80211_local *local,
struct ieee80211_supported_band *sband,
- struct sta_info *sta, u32 changed)
-{
- struct rate_control_ref *ref = local->rate_ctrl;
- struct ieee80211_sta *ista = &sta->sta;
- void *priv_sta = sta->rate_ctrl_priv;
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- if (ref && ref->ops->rate_update) {
- rcu_read_lock();
-
- chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
- rcu_read_unlock();
- return;
- }
-
- spin_lock_bh(&sta->rate_ctrl_lock);
- ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
- ista, priv_sta, changed);
- spin_unlock_bh(&sta->rate_ctrl_lock);
- rcu_read_unlock();
- }
- drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
-}
+ struct sta_info *sta, u32 changed);
static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
struct sta_info *sta, gfp_t gfp)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 543b67233..3928dbd24 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -867,7 +867,13 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
else
idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
- if (offset > 0) {
+ /* enable RTS/CTS if needed:
+ * - if station is in dynamic SMPS (and streams > 1)
+ * - for fallback rates, to increase chances of getting through
+ */
+ if (offset > 0 &&
+ (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
+ group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
flags |= IEEE80211_TX_RC_USE_RTS_CTS;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5dae166cb..5bc0b88d9 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -42,6 +42,51 @@ static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
u64_stats_update_end(&tstats->syncp);
}
+static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+ enum nl80211_iftype type)
+{
+ __le16 fc = hdr->frame_control;
+
+ if (ieee80211_is_data(fc)) {
+ if (len < 24) /* drop incorrect hdr len (data) */
+ return NULL;
+
+ if (ieee80211_has_a4(fc))
+ return NULL;
+ if (ieee80211_has_tods(fc))
+ return hdr->addr1;
+ if (ieee80211_has_fromds(fc))
+ return hdr->addr2;
+
+ return hdr->addr3;
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+ if (len < 24) /* drop incorrect hdr len (mgmt) */
+ return NULL;
+ return hdr->addr3;
+ }
+
+ if (ieee80211_is_ctl(fc)) {
+ if (ieee80211_is_pspoll(fc))
+ return hdr->addr1;
+
+ if (ieee80211_is_back_req(fc)) {
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ return hdr->addr2;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ return hdr->addr1;
+ default:
+ break; /* fall through to the return */
+ }
+ }
+ }
+
+ return NULL;
+}
+
/*
* monitor mode reception
*
@@ -77,8 +122,7 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
hdr = (void *)(skb->data + rtap_vendor_space);
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
- RX_FLAG_FAILED_PLCP_CRC |
- RX_FLAG_AMPDU_IS_ZEROLEN))
+ RX_FLAG_FAILED_PLCP_CRC))
return true;
if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -346,10 +390,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
put_unaligned_le32(status->ampdu_reference, pos);
pos += 4;
- if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
- flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
- if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
- flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
if (status->flag & RX_FLAG_AMPDU_IS_LAST)
@@ -1093,11 +1133,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- if (unlikely(rx->skb->len < 16)) {
- I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
- return RX_DROP_MONITOR;
- }
-
/* Drop disallowed frame classes based on STA auth/assoc state;
* IEEE 802.11, Chap 5.5.
*
@@ -1240,22 +1275,22 @@ static void sta_ps_end(struct sta_info *sta)
ieee80211_sta_ps_deliver_wakeup(sta);
}
-int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
{
- struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
bool in_ps;
- WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
+ WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
/* Don't let the same PS state be set twice */
- in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
+ in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
if ((start && in_ps) || (!start && !in_ps))
return -EINVAL;
if (start)
- sta_ps_start(sta_inf);
+ sta_ps_start(sta);
else
- sta_ps_end(sta_inf);
+ sta_ps_end(sta);
return 0;
}
@@ -1393,7 +1428,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->rx_bytes += rx->skb->len;
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
sta->last_signal = status->signal;
- ewma_add(&sta->avg_signal, -status->signal);
+ ewma_signal_add(&sta->avg_signal, -status->signal);
}
if (status->chains) {
@@ -1405,7 +1440,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
continue;
sta->chain_signal_last[i] = signal;
- ewma_add(&sta->chain_signal_avg[i], -signal);
+ ewma_signal_add(&sta->chain_signal_avg[i], -signal);
}
}
@@ -1647,7 +1682,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
return RX_DROP_MONITOR;
- rx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
} else {
return RX_DROP_MONITOR;
@@ -1883,7 +1917,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
/* Complete frame has been reassembled - process it now */
status = IEEE80211_SKB_RXCB(rx->skb);
- status->rx_flags |= IEEE80211_RX_FRAGMENTED;
out:
ieee80211_led_rx(rx->local);
@@ -2108,9 +2141,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
/* deliver to local stack */
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
- if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
- rx->local->napi)
- napi_gro_receive(rx->local->napi, skb);
+ if (rx->napi)
+ napi_gro_receive(rx->napi, skb);
else
netif_receive_skb(skb);
}
@@ -2378,9 +2410,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
tf->category == WLAN_CATEGORY_TDLS &&
(tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
- rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW;
- skb_queue_tail(&sdata->skb_queue, rx->skb);
- ieee80211_queue_work(&rx->local->hw, &sdata->work);
+ skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
+ schedule_work(&local->tdls_chsw_work);
if (rx->sta)
rx->sta->rx_packets++;
@@ -3004,7 +3035,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
return RX_QUEUED;
}
-/* TODO: use IEEE80211_RX_FRAGMENTED */
static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
struct ieee80211_rate *rate)
{
@@ -3216,7 +3246,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
/* This is OK -- must be QoS data frame */
.security_idx = tid,
.seqno_idx = tid,
- .flags = IEEE80211_RX_REORDER_TIMER,
+ .napi = NULL, /* must be NULL to not have races */
};
struct tid_ampdu_rx *tid_agg_rx;
@@ -3286,7 +3316,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
case NL80211_IFTYPE_OCB:
if (!bssid)
return false;
- if (ieee80211_is_beacon(hdr->frame_control))
+ if (!ieee80211_is_data_present(hdr->frame_control))
return false;
if (!is_broadcast_ether_addr(bssid))
return false;
@@ -3393,7 +3423,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
* be called with rcu_read_lock protection.
*/
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct napi_struct *napi)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
@@ -3409,6 +3440,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
memset(&rx, 0, sizeof(rx));
rx.skb = skb;
rx.local = local;
+ rx.napi = napi;
if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
@@ -3510,7 +3542,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
* This is the receive path handler. It is called by a low level driver when an
* 802.11 MPDU is received from the hardware.
*/
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct napi_struct *napi)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_rate *rate = NULL;
@@ -3609,7 +3642,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
ieee80211_tpt_led_trig_rx(local,
((struct ieee80211_hdr *)skb->data)->frame_control,
skb->len);
- __ieee80211_rx_handle_packet(hw, skb);
+ __ieee80211_rx_handle_packet(hw, skb, napi);
rcu_read_unlock();
@@ -3617,7 +3650,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
drop:
kfree_skb(skb);
}
-EXPORT_SYMBOL(ieee80211_rx);
+EXPORT_SYMBOL(ieee80211_rx_napi);
/* This is a version of the rx handler that can be called from hard irq
* context. Post the skb on the queue and schedule the tasklet */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 666ddac3c..64f193635 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -68,7 +68,7 @@ static const struct rhashtable_params sta_rht_params = {
.nelem_hint = 3, /* start small */
.automatic_shrinking = true,
.head_offset = offsetof(struct sta_info, hash_node),
- .key_offset = offsetof(struct sta_info, sta.addr),
+ .key_offset = offsetof(struct sta_info, addr),
.key_len = ETH_ALEN,
.hashfn = sta_addr_hash,
.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
@@ -249,6 +249,9 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
if (sta->sta.txq[0])
kfree(to_txq_info(sta->sta.txq[0]));
kfree(rcu_dereference_raw(sta->sta.rates));
+#ifdef CONFIG_MAC80211_MESH
+ kfree(sta->mesh);
+#endif
kfree(sta);
}
@@ -313,13 +316,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
#ifdef CONFIG_MAC80211_MESH
- spin_lock_init(&sta->plink_lock);
- if (ieee80211_vif_is_mesh(&sdata->vif) &&
- !sdata->u.mesh.user_mpm)
- init_timer(&sta->plink_timer);
- sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+ if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
+ if (!sta->mesh)
+ goto free;
+ spin_lock_init(&sta->mesh->plink_lock);
+ if (ieee80211_vif_is_mesh(&sdata->vif) &&
+ !sdata->u.mesh.user_mpm)
+ init_timer(&sta->mesh->plink_timer);
+ sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+ }
#endif
+ memcpy(sta->addr, addr, ETH_ALEN);
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
sta->sdata = sdata;
@@ -332,9 +341,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
ktime_get_ts(&uptime);
sta->last_connected = uptime.tv_sec;
- ewma_init(&sta->avg_signal, 1024, 8);
+ ewma_signal_init(&sta->avg_signal);
for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
- ewma_init(&sta->chain_signal_avg[i], 1024, 8);
+ ewma_signal_init(&sta->chain_signal_avg[i]);
if (local->ops->wake_tx_queue) {
void *txq_data;
@@ -405,6 +414,9 @@ free_txq:
if (sta->sta.txq[0])
kfree(to_txq_info(sta->sta.txq[0]));
free:
+#ifdef CONFIG_MAC80211_MESH
+ kfree(sta->mesh);
+#endif
kfree(sta);
return NULL;
}
@@ -623,7 +635,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
bool indicate_tim = false;
u8 ignore_for_tim = sta->sta.uapsd_queues;
int ac;
- u16 id;
+ u16 id = sta->sta.aid;
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -631,12 +643,9 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
return;
ps = &sta->sdata->bss->ps;
- id = sta->sta.aid;
#ifdef CONFIG_MAC80211_MESH
} else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
ps = &sta->sdata->u.mesh.ps;
- /* TIM map only for 1 <= PLID <= IEEE80211_MAX_AID */
- id = sta->plid % (IEEE80211_MAX_AID + 1);
#endif
} else {
return;
@@ -1887,7 +1896,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
}
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
- sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+ sinfo->signal_avg =
+ (s8) -ewma_signal_read(&sta->avg_signal);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
}
}
@@ -1902,7 +1912,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
sinfo->chain_signal[i] = sta->chain_signal_last[i];
sinfo->chain_signal_avg[i] =
- (s8) -ewma_read(&sta->chain_signal_avg[i]);
+ (s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
}
}
@@ -1956,16 +1966,16 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
BIT(NL80211_STA_INFO_PEER_PM) |
BIT(NL80211_STA_INFO_NONPEER_PM);
- sinfo->llid = sta->llid;
- sinfo->plid = sta->plid;
- sinfo->plink_state = sta->plink_state;
+ sinfo->llid = sta->mesh->llid;
+ sinfo->plid = sta->mesh->plid;
+ sinfo->plink_state = sta->mesh->plink_state;
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
- sinfo->t_offset = sta->t_offset;
+ sinfo->t_offset = sta->mesh->t_offset;
}
- sinfo->local_pm = sta->local_pm;
- sinfo->peer_pm = sta->peer_pm;
- sinfo->nonpeer_pm = sta->nonpeer_pm;
+ sinfo->local_pm = sta->mesh->local_pm;
+ sinfo->peer_pm = sta->mesh->peer_pm;
+ sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
#endif
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 226f8ca47..b087c71ff 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -53,6 +53,8 @@
* @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching
* @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this
* TDLS peer
+ * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on
+ * the BSS base channel.
* @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
* keeping station in power-save mode, reply when the driver
* unblocks the station.
@@ -84,6 +86,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_TDLS_INITIATOR,
WLAN_STA_TDLS_CHAN_SWITCH,
WLAN_STA_TDLS_OFF_CHANNEL,
+ WLAN_STA_TDLS_WIDER_BW,
WLAN_STA_UAPSD,
WLAN_STA_SP,
WLAN_STA_4ADDR_EVENT,
@@ -270,6 +273,56 @@ struct ieee80211_fast_tx {
};
/**
+ * struct mesh_sta - mesh STA information
+ * @plink_lock: serialize access to plink fields
+ * @llid: Local link ID
+ * @plid: Peer link ID
+ * @aid: local aid supplied by peer
+ * @reason: Cancel reason on PLINK_HOLDING state
+ * @plink_retries: Retries in establishment
+ * @plink_state: peer link state
+ * @plink_timeout: timeout of peer link
+ * @plink_timer: peer link watch timer
+ * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ * calculating clockdrift
+ * @local_pm: local link-specific power save mode
+ * @peer_pm: peer-specific power save mode towards local STA
+ * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @processed_beacon: set to true after peer rates and capabilities are
+ * processed
+ * @fail_avg: moving percentage of failed MSDUs
+ */
+struct mesh_sta {
+ struct timer_list plink_timer;
+
+ s64 t_offset;
+ s64 t_offset_setpoint;
+
+ spinlock_t plink_lock;
+ u16 llid;
+ u16 plid;
+ u16 aid;
+ u16 reason;
+ u8 plink_retries;
+
+ bool processed_beacon;
+
+ enum nl80211_plink_state plink_state;
+ u32 plink_timeout;
+
+ /* mesh power save */
+ enum nl80211_mesh_power_mode local_pm;
+ enum nl80211_mesh_power_mode peer_pm;
+ enum nl80211_mesh_power_mode nonpeer_pm;
+
+ /* moving percentage of failed MSDUs */
+ unsigned int fail_avg;
+};
+
+DECLARE_EWMA(signal, 1024, 8)
+
+/**
* struct sta_info - STA information
*
* This structure collects information about a station that
@@ -278,12 +331,13 @@ struct ieee80211_fast_tx {
* @list: global linked list entry
* @free_list: list entry for keeping track of stations to free
* @hash_node: hash node for rhashtable
+ * @addr: station's MAC address - duplicated from public part to
+ * let the hash table work with just a single cacheline
* @local: pointer to the global information
* @sdata: virtual interface this station belongs to
* @ptk: peer keys negotiated with this station, if any
* @ptk_idx: last installed peer key index
* @gtk: group keys negotiated with this station, if any
- * @gtk_idx: last installed group key index
* @rate_ctrl: rate control algorithm reference
* @rate_ctrl_lock: spinlock used to protect rate control data
* (data inside the algorithm, so serializes calls there)
@@ -318,30 +372,17 @@ struct ieee80211_fast_tx {
* @last_signal: signal of last received frame from this STA
* @avg_signal: moving average of signal of received frames from this STA
* @last_ack_signal: signal of last received Ack frame from this STA
- * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
+ * @last_seq_ctrl: last received seq/frag number from this STA (per TID
+ * plus one for non-QoS frames)
* @tx_filtered_count: number of frames the hardware filtered for this STA
* @tx_retry_failed: number of frames that failed retry
* @tx_retry_count: total number of retries for frames to this STA
- * @fail_avg: moving percentage of failed MSDUs
* @tx_packets: number of RX/TX MSDUs
* @tx_bytes: number of bytes transmitted to this STA
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
- * @plink_lock: serialize access to plink fields
- * @llid: Local link ID
- * @plid: Peer link ID
- * @reason: Cancel reason on PLINK_HOLDING state
- * @plink_retries: Retries in establishment
- * @plink_state: peer link state
- * @plink_timeout: timeout of peer link
- * @plink_timer: peer link watch timer
- * @t_offset: timing offset relative to this host
- * @t_offset_setpoint: reference timing offset of this sta to be used when
- * calculating clockdrift
- * @local_pm: local link-specific power save mode
- * @peer_pm: peer-specific power save mode towards local STA
- * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @mesh: mesh STA information
* @debugfs: debug filesystem info
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
@@ -369,19 +410,19 @@ struct ieee80211_fast_tx {
* @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
* entry for non-QoS frames
* @fast_tx: TX fastpath information
- * @processed_beacon: set to true after peer rates and capabilities are
- * processed
+ * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
+ * the BSS one.
*/
struct sta_info {
/* General information, mostly static */
struct list_head list, free_list;
struct rcu_head rcu_head;
struct rhash_head hash_node;
+ u8 addr[ETH_ALEN];
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
- u8 gtk_idx;
u8 ptk_idx;
struct rate_control_ref *rate_ctrl;
void *rate_ctrl_priv;
@@ -390,6 +431,10 @@ struct sta_info {
struct ieee80211_fast_tx __rcu *fast_tx;
+#ifdef CONFIG_MAC80211_MESH
+ struct mesh_sta *mesh;
+#endif
+
struct work_struct drv_deliver_wk;
u16 listen_interval;
@@ -419,12 +464,12 @@ struct sta_info {
unsigned long rx_fragments;
unsigned long rx_dropped;
int last_signal;
- struct ewma avg_signal;
+ struct ewma_signal avg_signal;
int last_ack_signal;
u8 chains;
s8 chain_signal_last[IEEE80211_MAX_CHAINS];
- struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS];
+ struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
/* Plus 1 for non-QoS frames */
__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
@@ -432,8 +477,6 @@ struct sta_info {
/* Updated from TX status path only, no locking requirements */
unsigned long tx_filtered_count;
unsigned long tx_retry_failed, tx_retry_count;
- /* moving percentage of failed MSDUs */
- unsigned int fail_avg;
/* Updated from TX path only, no locking requirements */
u64 tx_packets[IEEE80211_NUM_ACS];
@@ -455,29 +498,6 @@ struct sta_info {
struct sta_ampdu_mlme ampdu_mlme;
u8 timer_to_tid[IEEE80211_NUM_TIDS];
-#ifdef CONFIG_MAC80211_MESH
- /*
- * Mesh peer link attributes, protected by plink_lock.
- * TODO: move to a sub-structure that is referenced with pointer?
- */
- spinlock_t plink_lock;
- u16 llid;
- u16 plid;
- u16 reason;
- u8 plink_retries;
- enum nl80211_plink_state plink_state;
- u32 plink_timeout;
- struct timer_list plink_timer;
-
- s64 t_offset;
- s64 t_offset_setpoint;
- /* mesh power save */
- enum nl80211_mesh_power_mode local_pm;
- enum nl80211_mesh_power_mode peer_pm;
- enum nl80211_mesh_power_mode nonpeer_pm;
- bool processed_beacon;
-#endif
-
#ifdef CONFIG_MAC80211_DEBUGFS
struct sta_info_debugfsdentries {
struct dentry *dir;
@@ -498,6 +518,8 @@ struct sta_info {
u8 reserved_tid;
+ struct cfg80211_chan_def tdls_chandef;
+
/* keep last! */
struct ieee80211_sta sta;
};
@@ -505,7 +527,7 @@ struct sta_info {
static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
{
#ifdef CONFIG_MAC80211_MESH
- return sta->plink_state;
+ return sta->mesh->plink_state;
#endif
return NL80211_PLINK_LISTEN;
}
@@ -608,7 +630,7 @@ u32 sta_addr_hash(const void *key, u32 length, u32 seed);
_sta_bucket_idx(tbl, _addr), \
hash_node) \
/* compare address and run code only if it matches */ \
- if (ether_addr_equal(_sta->sta.addr, (_addr)))
+ if (ether_addr_equal(_sta->addr, (_addr)))
/*
* Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 45628f37c..3ed7ddfbf 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -101,6 +101,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
* when it wakes up for the next time.
*/
set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
+ ieee80211_clear_fast_xmit(sta);
/*
* This code races in the following way:
@@ -515,7 +516,7 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
if (!sdata) {
skb->dev = NULL;
- } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+ } else {
unsigned int hdr_size =
ieee80211_hdrlen(hdr->frame_control);
@@ -529,9 +530,6 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
ieee80211_mgd_conn_tx_status(sdata,
hdr->frame_control,
acked);
- } else {
- /* we assign ack frame ID for the others */
- WARN_ON(1);
}
rcu_read_unlock();
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 8db6e2994..4e202d067 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -4,6 +4,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
+ * Copyright 2015 Intel Deutschland GmbH
*
* This file is GPLv2 as found in COPYING.
*/
@@ -11,6 +12,7 @@
#include <linux/ieee80211.h>
#include <linux/log2.h>
#include <net/cfg80211.h>
+#include <linux/rtnetlink.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -35,20 +37,28 @@ void ieee80211_tdls_peer_del_work(struct work_struct *wk)
mutex_unlock(&local->mtx);
}
-static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local,
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
- u8 *pos = (void *)skb_put(skb, 7);
+ struct ieee80211_local *local = sdata->local;
bool chan_switch = local->hw.wiphy->features &
NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+ bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+ enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+ struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+ bool vht = sband && sband->vht_cap.vht_supported;
+ u8 *pos = (void *)skb_put(skb, 10);
*pos++ = WLAN_EID_EXT_CAPABILITY;
- *pos++ = 5; /* len */
+ *pos++ = 8; /* len */
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = 0x0;
*pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+ *pos++ = 0;
+ *pos++ = 0;
+ *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0;
}
static u8
@@ -284,6 +294,60 @@ static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata,
}
static void
+ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
+{
+ /* IEEE802.11ac-2013 Table E-4 */
+ u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+ struct cfg80211_chan_def uc = sta->tdls_chandef;
+ enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+ int i;
+
+ /* only support upgrading non-narrow channels up to 80Mhz */
+ if (max_width == NL80211_CHAN_WIDTH_5 ||
+ max_width == NL80211_CHAN_WIDTH_10)
+ return;
+
+ if (max_width > NL80211_CHAN_WIDTH_80)
+ max_width = NL80211_CHAN_WIDTH_80;
+
+ if (uc.width == max_width)
+ return;
+ /*
+ * Channel usage constrains in the IEEE802.11ac-2013 specification only
+ * allow expanding a 20MHz channel to 80MHz in a single way. In
+ * addition, there are no 40MHz allowed channels that are not part of
+ * the allowed 80MHz range in the 5GHz spectrum (the relevant one here).
+ */
+ for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
+ if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
+ uc.center_freq1 = centers_80mhz[i];
+ uc.width = NL80211_CHAN_WIDTH_80;
+ break;
+ }
+
+ if (!uc.center_freq1)
+ return;
+
+ /* proceed to downgrade the chandef until usable or the same */
+ while (uc.width > max_width &&
+ !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
+ &uc, sdata->wdev.iftype))
+ ieee80211_chandef_downgrade(&uc);
+
+ if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
+ tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
+ sta->tdls_chandef.width, uc.width);
+
+ /*
+ * the station is not yet authorized when BW upgrade is done,
+ * locking is not required
+ */
+ sta->tdls_chandef = uc;
+ }
+}
+
+static void
ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, const u8 *peer,
u8 action_code, bool initiator,
@@ -320,7 +384,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
offset = noffset;
}
- ieee80211_tdls_add_ext_capab(local, skb);
+ ieee80211_tdls_add_ext_capab(sdata, skb);
/* add the QoS element if we support it */
if (local->hw.queues >= IEEE80211_NUM_ACS &&
@@ -350,15 +414,17 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
offset = noffset;
}
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
/* we should have the peer STA if we're already responding */
if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
sta = sta_info_get(sdata, peer);
if (WARN_ON_ONCE(!sta)) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return;
}
+
+ sta->tdls_chandef = sdata->vif.bss_conf.chandef;
}
ieee80211_tdls_add_oper_classes(sdata, skb);
@@ -384,10 +450,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
- /* disable SMPS in TDLS responder */
- sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
- << IEEE80211_HT_CAP_SM_PS_SHIFT;
-
/* the peer caps are already intersected with our own */
memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
@@ -448,9 +510,16 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+
+ /*
+ * if both peers support WIDER_BW, we can expand the chandef to
+ * a wider compatible one, up to 80MHz
+ */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+ ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
/* add any remaining IEs */
if (extra_ies_len) {
@@ -474,15 +543,17 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
u8 *pos;
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
ap_sta = sta_info_get(sdata, ifmgd->bssid);
if (WARN_ON_ONCE(!sta || !ap_sta)) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return;
}
+ sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+
/* add any custom IEs that go before the QoS IE */
if (extra_ies_len) {
static const u8 before_qos[] = {
@@ -530,12 +601,19 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
/* only include VHT-operation if not on the 2.4GHz band */
if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+ /*
+ * if both peers support WIDER_BW, we can expand the chandef to
+ * a wider compatible one, up to 80MHz
+ */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+ ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
+
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
- &sdata->vif.bss_conf.chandef);
+ &sta->tdls_chandef);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
/* add any remaining IEs */
if (extra_ies_len) {
@@ -784,7 +862,7 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata,
max(sizeof(struct ieee80211_mgmt),
sizeof(struct ieee80211_tdls_data)) +
50 + /* supported rates */
- 7 + /* ext capab */
+ 10 + /* ext capab */
26 + /* max(WMM-info, WMM-param) */
2 + max(sizeof(struct ieee80211_ht_cap),
sizeof(struct ieee80211_ht_operation)) +
@@ -983,8 +1061,17 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode;
int ret;
+ /* don't support setup with forced SMPS mode that's not off */
+ if (smps_mode != IEEE80211_SMPS_AUTOMATIC &&
+ smps_mode != IEEE80211_SMPS_OFF) {
+ tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n",
+ smps_mode);
+ return -ENOTSUPP;
+ }
+
mutex_lock(&local->mtx);
/* we don't support concurrent TDLS peer setups */
@@ -1146,6 +1233,74 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *ctx;
+
+ mutex_lock(&local->chanctx_mtx);
+ conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
+ if (conf) {
+ ctx = container_of(conf, struct ieee80211_chanctx, conf);
+ ieee80211_recalc_chanctx_chantype(local, ctx);
+ }
+ mutex_unlock(&local->chanctx_mtx);
+}
+
+static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ bool result = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+ !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
+ !sta->sta.ht_cap.ht_supported)
+ continue;
+ result = true;
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void
+iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ bool tdls_ht;
+ u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
+ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
+ u16 opmode;
+
+ /* Nothing to do if the BSS connection uses HT */
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
+ return;
+
+ tdls_ht = (sta && sta->sta.ht_cap.ht_supported) ||
+ iee80211_tdls_have_ht_peers(sdata);
+
+ opmode = sdata->vif.bss_conf.ht_operation_mode;
+
+ if (tdls_ht)
+ opmode |= protection;
+ else
+ opmode &= ~protection;
+
+ if (opmode == sdata->vif.bss_conf.ht_operation_mode)
+ return;
+
+ sdata->vif.bss_conf.ht_operation_mode = opmode;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+}
+
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper)
{
@@ -1171,6 +1326,10 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
return -ENOTSUPP;
}
+ /* protect possible bss_conf changes and avoid concurrency in
+ * ieee80211_bss_info_change_notify()
+ */
+ sdata_lock(sdata);
mutex_lock(&local->mtx);
tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
@@ -1182,16 +1341,20 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
break;
}
- rcu_read_lock();
+ iee80211_tdls_recalc_chanctx(sdata);
+
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, peer);
if (!sta) {
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
ret = -ENOLINK;
break;
}
+ iee80211_tdls_recalc_ht_protection(sdata, sta);
+
set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
!ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
@@ -1213,6 +1376,12 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
ieee80211_flush_queues(local, sdata, false);
ret = sta_info_destroy_addr(sdata, peer);
+
+ mutex_lock(&local->sta_mtx);
+ iee80211_tdls_recalc_ht_protection(sdata, NULL);
+ mutex_unlock(&local->sta_mtx);
+
+ iee80211_tdls_recalc_chanctx(sdata);
break;
default:
ret = -ENOTSUPP;
@@ -1224,7 +1393,12 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
eth_zero_addr(sdata->u.mgd.tdls_peer);
}
+ if (ret == 0)
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.request_smps_work);
+
mutex_unlock(&local->mtx);
+ sdata_unlock(sdata);
return ret;
}
@@ -1627,6 +1801,31 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
return -EINVAL;
}
+ if (!elems.sec_chan_offs) {
+ chan_type = NL80211_CHAN_HT20;
+ } else {
+ switch (elems.sec_chan_offs->sec_chan_offs) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ chan_type = NL80211_CHAN_HT40PLUS;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ chan_type = NL80211_CHAN_HT40MINUS;
+ break;
+ default:
+ chan_type = NL80211_CHAN_HT20;
+ break;
+ }
+ }
+
+ cfg80211_chandef_create(&chandef, chan, chan_type);
+
+ /* we will be active on the TDLS link */
+ if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef,
+ sdata->wdev.iftype)) {
+ tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n");
+ return -EINVAL;
+ }
+
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, tf->sa);
if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
@@ -1647,27 +1846,15 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata,
goto out;
}
- if (!sta->sta.ht_cap.ht_supported) {
- chan_type = NL80211_CHAN_NO_HT;
- } else if (!elems.sec_chan_offs) {
- chan_type = NL80211_CHAN_HT20;
- } else {
- switch (elems.sec_chan_offs->sec_chan_offs) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- chan_type = NL80211_CHAN_HT40PLUS;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- chan_type = NL80211_CHAN_HT40MINUS;
- break;
- default:
- chan_type = NL80211_CHAN_HT20;
- break;
- }
+ /* peer should have known better */
+ if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs &&
+ elems.sec_chan_offs->sec_chan_offs) {
+ tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
+ ret = -ENOTSUPP;
+ goto out;
}
- cfg80211_chandef_create(&chandef, chan, chan_type);
params.chandef = &chandef;
-
params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
@@ -1691,12 +1878,15 @@ out:
return ret;
}
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+static void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_tdls_data *tf = (void *)skb->data;
struct wiphy *wiphy = sdata->local->hw.wiphy;
+ ASSERT_RTNL();
+
/* make sure the driver supports it */
if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
return;
@@ -1720,3 +1910,47 @@ void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
return;
}
}
+
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ continue;
+
+ ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+ NL80211_TDLS_TEARDOWN, reason,
+ GFP_ATOMIC);
+ }
+ rcu_read_unlock();
+}
+
+void ieee80211_tdls_chsw_work(struct work_struct *wk)
+{
+ struct ieee80211_local *local =
+ container_of(wk, struct ieee80211_local, tdls_chsw_work);
+ struct ieee80211_sub_if_data *sdata;
+ struct sk_buff *skb;
+ struct ieee80211_tdls_data *tf;
+
+ rtnl_lock();
+ while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
+ tf = (struct ieee80211_tdls_data *)skb->data;
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata) ||
+ sdata->vif.type != NL80211_IFTYPE_STATION ||
+ !ether_addr_equal(tf->da, sdata->vif.addr))
+ continue;
+
+ ieee80211_process_tdls_channel_switch(sdata, skb);
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+ rtnl_unlock();
+}
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 6f14591d8..0b13bfa6f 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -33,11 +33,11 @@
__field(u32, chan_width) \
__field(u32, center_freq1) \
__field(u32, center_freq2)
-#define CHANDEF_ASSIGN(c) \
- __entry->control_freq = (c)->chan ? (c)->chan->center_freq : 0; \
- __entry->chan_width = (c)->width; \
- __entry->center_freq1 = (c)->center_freq1; \
- __entry->center_freq2 = (c)->center_freq2;
+#define CHANDEF_ASSIGN(c) \
+ __entry->control_freq = (c) ? ((c)->chan ? (c)->chan->center_freq : 0) : 0; \
+ __entry->chan_width = (c) ? (c)->width : 0; \
+ __entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \
+ __entry->center_freq2 = (c) ? (c)->center_freq2 : 0;
#define CHANDEF_PR_FMT " control:%d MHz width:%d center: %d/%d MHz"
#define CHANDEF_PR_ARG __entry->control_freq, __entry->chan_width, \
__entry->center_freq1, __entry->center_freq2
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8f1df6793..7892eb8ed 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -607,7 +607,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
if (tx->key) {
bool skip_hw = false;
- tx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
switch (tx->key->conf.cipher) {
@@ -687,7 +686,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
- tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
+ tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ tx->sdata->vif.type == NL80211_IFTYPE_OCB);
/* set up RTS protection if desired */
if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -1218,8 +1218,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
if (!tx->sta)
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+ else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ ieee80211_check_fast_xmit(tx->sta);
+ }
info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
@@ -2451,7 +2453,8 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
- test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+ test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
+ test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
goto out;
if (sdata->noack_map)
@@ -2774,7 +2777,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
sdata->sequence_number += 0x10;
}
- sta->tx_msdu[tid]++;
+ if (skb_shinfo(skb)->gso_size)
+ sta->tx_msdu[tid] +=
+ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
+ else
+ sta->tx_msdu[tid]++;
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
@@ -3210,6 +3217,16 @@ static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
}
+static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
+{
+ beacon->csa_current_counter--;
+
+ /* the counter should never reach 0 */
+ WARN_ON_ONCE(!beacon->csa_current_counter);
+
+ return beacon->csa_current_counter;
+}
+
u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -3228,11 +3245,7 @@ u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
if (!beacon)
goto unlock;
- beacon->csa_current_counter--;
-
- /* the counter should never reach 0 */
- WARN_ON_ONCE(!beacon->csa_current_counter);
- count = beacon->csa_current_counter;
+ count = __ieee80211_csa_update_counter(beacon);
unlock:
rcu_read_unlock();
@@ -3332,7 +3345,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (beacon) {
if (beacon->csa_counter_offsets[0]) {
if (!is_template)
- ieee80211_csa_update_counter(vif);
+ __ieee80211_csa_update_counter(beacon);
ieee80211_set_csa(sdata, beacon);
}
@@ -3378,7 +3391,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (beacon->csa_counter_offsets[0]) {
if (!is_template)
- ieee80211_csa_update_counter(vif);
+ __ieee80211_csa_update_counter(beacon);
ieee80211_set_csa(sdata, beacon);
}
@@ -3408,7 +3421,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
* for now we leave it consistent with overall
* mac80211's behavior.
*/
- ieee80211_csa_update_counter(vif);
+ __ieee80211_csa_update_counter(beacon);
ieee80211_set_csa(sdata, beacon);
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 43e5aadd7..cd90ece80 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -47,55 +47,6 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
- enum nl80211_iftype type)
-{
- __le16 fc = hdr->frame_control;
-
- /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
- if (len < 16)
- return NULL;
-
- if (ieee80211_is_data(fc)) {
- if (len < 24) /* drop incorrect hdr len (data) */
- return NULL;
-
- if (ieee80211_has_a4(fc))
- return NULL;
- if (ieee80211_has_tods(fc))
- return hdr->addr1;
- if (ieee80211_has_fromds(fc))
- return hdr->addr2;
-
- return hdr->addr3;
- }
-
- if (ieee80211_is_mgmt(fc)) {
- if (len < 24) /* drop incorrect hdr len (mgmt) */
- return NULL;
- return hdr->addr3;
- }
-
- if (ieee80211_is_ctl(fc)) {
- if (ieee80211_is_pspoll(fc))
- return hdr->addr1;
-
- if (ieee80211_is_back_req(fc)) {
- switch (type) {
- case NL80211_IFTYPE_STATION:
- return hdr->addr2;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_AP_VLAN:
- return hdr->addr1;
- default:
- break; /* fall through to the return */
- }
- }
- }
-
- return NULL;
-}
-
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
@@ -752,7 +703,12 @@ EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif);
struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!vif)
+ return NULL;
+
+ sdata = vif_to_sdata(vif);
if (!ieee80211_sdata_running(sdata) ||
!(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
@@ -1709,6 +1665,7 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
local->resuming = false;
local->suspended = false;
local->started = false;
+ local->in_reconfig = false;
/* scheduled scan clearly can't be running any more, but tell
* cfg80211 and clear local state
@@ -1759,16 +1716,24 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sched_scan_sdata;
struct cfg80211_sched_scan_request *sched_scan_req;
bool sched_scan_stopped = false;
+ bool suspended = local->suspended;
/* nothing to do if HW shouldn't run */
if (!local->open_count)
goto wake_up;
#ifdef CONFIG_PM
- if (local->suspended)
+ if (suspended)
local->resuming = true;
if (local->wowlan) {
+ /*
+ * In the wowlan case, both mac80211 and the device
+ * are functional when the resume op is called, so
+ * clear local->suspended so the device could operate
+ * normally (e.g. pass rx frames).
+ */
+ local->suspended = false;
res = drv_resume(local);
local->wowlan = false;
if (res < 0) {
@@ -1781,8 +1746,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/*
* res is 1, which means the driver requested
* to go through a regular reset on wakeup.
+ * restore local->suspended in this case.
*/
reconfig_due_to_wowlan = true;
+ local->suspended = true;
}
#endif
@@ -1794,7 +1761,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
*/
res = drv_start(local);
if (res) {
- if (local->suspended)
+ if (suspended)
WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
else
WARN(1, "Hardware became unavailable during restart.\n");
@@ -2088,10 +2055,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
+ if (local->open_count && (!suspended || reconfig_due_to_wowlan))
drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
- if (!local->suspended)
+ if (!suspended)
return 0;
#ifdef CONFIG_PM
@@ -2984,6 +2951,13 @@ ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i)
if (end > 0)
return false;
+ /* One shot NOA */
+ if (data->count[i] == 1)
+ return false;
+
+ if (data->desc[i].interval == 0)
+ return false;
+
/* End time is in the past, check for repetitions */
skip = DIV_ROUND_UP(-end, data->desc[i].interval);
if (data->count[i] < 255) {
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 80694d55d..ff1c79892 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -120,6 +120,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
+ bool have_80mhz;
memset(vht_cap, 0, sizeof(*vht_cap));
@@ -129,6 +130,20 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
return;
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ have_80mhz = false;
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
+
+ have_80mhz = true;
+ break;
+ }
+
+ if (!have_80mhz)
+ return;
+
/*
* A VHT STA must support 40 MHz, but if we verify that here
* then we break a few things - some APs (e.g. Netgear R6300v2
@@ -308,11 +323,15 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
enum ieee80211_sta_rx_bandwidth bw;
+ enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
- bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width);
- bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+ bw = ieee80211_sta_cap_rx_bw(sta);
bw = min(bw, sta->cur_max_bandwidth);
+ /* do not cap the BW of TDLS WIDER_BW peers by the bss */
+ if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+
return bw;
}
@@ -422,3 +441,29 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
if (changed > 0)
rate_control_rate_update(local, sband, sta, changed);
}
+
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+ u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+ int i;
+ u16 mask, cap = le16_to_cpu(vht_cap);
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ switch (mask) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ vht_mask[i] = 0x00FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ vht_mask[i] = 0x01FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ vht_mask[i] = 0x03FF;
+ break;
+ case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+ default:
+ vht_mask[i] = 0;
+ break;
+ }
+ }
+}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 943f76065..feb547dc8 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -516,31 +516,34 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_DROP_UNUSABLE;
}
- ccmp_hdr2pn(pn, skb->data + hdrlen);
+ if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+ ccmp_hdr2pn(pn, skb->data + hdrlen);
- queue = rx->security_idx;
+ queue = rx->security_idx;
- if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) {
- key->u.ccmp.replays++;
- return RX_DROP_UNUSABLE;
- }
+ if (memcmp(pn, key->u.ccmp.rx_pn[queue],
+ IEEE80211_CCMP_PN_LEN) <= 0) {
+ key->u.ccmp.replays++;
+ return RX_DROP_UNUSABLE;
+ }
- if (!(status->flag & RX_FLAG_DECRYPTED)) {
- u8 aad[2 * AES_BLOCK_SIZE];
- u8 b_0[AES_BLOCK_SIZE];
- /* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad);
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ u8 aad[2 * AES_BLOCK_SIZE];
+ u8 b_0[AES_BLOCK_SIZE];
+ /* hardware didn't decrypt/verify MIC */
+ ccmp_special_blocks(skb, pn, b_0, aad);
+
+ if (ieee80211_aes_ccm_decrypt(
+ key->u.ccmp.tfm, b_0, aad,
+ skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
+ data_len,
+ skb->data + skb->len - mic_len, mic_len))
+ return RX_DROP_UNUSABLE;
+ }
- if (ieee80211_aes_ccm_decrypt(
- key->u.ccmp.tfm, b_0, aad,
- skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
- data_len,
- skb->data + skb->len - mic_len, mic_len))
- return RX_DROP_UNUSABLE;
+ memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
}
- memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
-
/* Remove CCMP header and MIC */
if (pskb_trim(skb, skb->len - mic_len))
return RX_DROP_UNUSABLE;
@@ -739,31 +742,35 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
}
- gcmp_hdr2pn(pn, skb->data + hdrlen);
+ if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+ gcmp_hdr2pn(pn, skb->data + hdrlen);
- queue = rx->security_idx;
+ queue = rx->security_idx;
- if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
- key->u.gcmp.replays++;
- return RX_DROP_UNUSABLE;
- }
+ if (memcmp(pn, key->u.gcmp.rx_pn[queue],
+ IEEE80211_GCMP_PN_LEN) <= 0) {
+ key->u.gcmp.replays++;
+ return RX_DROP_UNUSABLE;
+ }
- if (!(status->flag & RX_FLAG_DECRYPTED)) {
- u8 aad[2 * AES_BLOCK_SIZE];
- u8 j_0[AES_BLOCK_SIZE];
- /* hardware didn't decrypt/verify MIC */
- gcmp_special_blocks(skb, pn, j_0, aad);
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
+ u8 aad[2 * AES_BLOCK_SIZE];
+ u8 j_0[AES_BLOCK_SIZE];
+ /* hardware didn't decrypt/verify MIC */
+ gcmp_special_blocks(skb, pn, j_0, aad);
+
+ if (ieee80211_aes_gcm_decrypt(
+ key->u.gcmp.tfm, j_0, aad,
+ skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+ data_len,
+ skb->data + skb->len -
+ IEEE80211_GCMP_MIC_LEN))
+ return RX_DROP_UNUSABLE;
+ }
- if (ieee80211_aes_gcm_decrypt(
- key->u.gcmp.tfm, j_0, aad,
- skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
- data_len,
- skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
- return RX_DROP_UNUSABLE;
+ memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
}
- memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
-
/* Remove GCMP header and MIC */
if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
return RX_DROP_UNUSABLE;
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 317c4662e..c865ebb2a 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -44,6 +44,49 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
ieee802154_if_remove(sdata);
}
+#ifdef CONFIG_PM
+static int ieee802154_suspend(struct wpan_phy *wpan_phy)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+ if (!local->open_count)
+ goto suspend;
+
+ ieee802154_stop_queue(&local->hw);
+ synchronize_net();
+
+ /* stop hardware - this must stop RX */
+ ieee802154_stop_device(local);
+
+suspend:
+ local->suspended = true;
+ return 0;
+}
+
+static int ieee802154_resume(struct wpan_phy *wpan_phy)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ int ret;
+
+ /* nothing to do if HW shouldn't run */
+ if (!local->open_count)
+ goto wake_up;
+
+ /* restart hardware */
+ ret = drv_start(local);
+ if (ret)
+ return ret;
+
+wake_up:
+ ieee802154_wake_queue(&local->hw);
+ local->suspended = false;
+ return 0;
+}
+#else
+#define ieee802154_suspend NULL
+#define ieee802154_resume NULL
+#endif
+
static int
ieee802154_add_iface(struct wpan_phy *phy, const char *name,
unsigned char name_assign_type,
@@ -145,13 +188,18 @@ static int
ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 pan_id)
{
+ int ret;
+
ASSERT_RTNL();
if (wpan_dev->pan_id == pan_id)
return 0;
- wpan_dev->pan_id = pan_id;
- return 0;
+ ret = mac802154_wpan_update_llsec(wpan_dev->netdev);
+ if (!ret)
+ wpan_dev->pan_id = pan_id;
+
+ return ret;
}
static int
@@ -161,10 +209,6 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
{
ASSERT_RTNL();
- if (wpan_dev->min_be == min_be &&
- wpan_dev->max_be == max_be)
- return 0;
-
wpan_dev->min_be = min_be;
wpan_dev->max_be = max_be;
return 0;
@@ -176,9 +220,6 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
{
ASSERT_RTNL();
- if (wpan_dev->short_addr == short_addr)
- return 0;
-
wpan_dev->short_addr = short_addr;
return 0;
}
@@ -190,9 +231,6 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
{
ASSERT_RTNL();
- if (wpan_dev->csma_retries == max_csma_backoffs)
- return 0;
-
wpan_dev->csma_retries = max_csma_backoffs;
return 0;
}
@@ -204,9 +242,6 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
{
ASSERT_RTNL();
- if (wpan_dev->frame_retries == max_frame_retries)
- return 0;
-
wpan_dev->frame_retries = max_frame_retries;
return 0;
}
@@ -217,16 +252,25 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
{
ASSERT_RTNL();
- if (wpan_dev->lbt == mode)
- return 0;
-
wpan_dev->lbt = mode;
return 0;
}
+static int
+ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev, bool ackreq)
+{
+ ASSERT_RTNL();
+
+ wpan_dev->ackreq = ackreq;
+ return 0;
+}
+
const struct cfg802154_ops mac802154_config_ops = {
.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
+ .suspend = ieee802154_suspend,
+ .resume = ieee802154_resume,
.add_virtual_intf = ieee802154_add_iface,
.del_virtual_intf = ieee802154_del_iface,
.set_channel = ieee802154_set_channel,
@@ -239,4 +283,5 @@ const struct cfg802154_ops mac802154_config_ops = {
.set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
.set_max_frame_retries = ieee802154_set_max_frame_retries,
.set_lbt_mode = ieee802154_set_lbt_mode,
+ .set_ackreq_default = ieee802154_set_ackreq_default,
};
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 34755d575..56ccffa3f 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -56,9 +56,13 @@ struct ieee802154_local {
struct hrtimer ifs_timer;
bool started;
+ bool suspended;
struct tasklet_struct tasklet;
struct sk_buff_head skb_queue;
+
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
};
enum {
@@ -94,8 +98,6 @@ struct ieee802154_sub_if_data {
struct mac802154_llsec sec;
};
-#define MAC802154_CHAN_NONE 0xff /* No channel is assigned */
-
/* utility functions/constants */
extern const void *const mac802154_wpan_phy_privid; /* for wpan_phy privid */
@@ -125,6 +127,8 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
+void ieee802154_xmit_worker(struct work_struct *work);
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t
@@ -167,6 +171,8 @@ void mac802154_get_table(struct net_device *dev,
struct ieee802154_llsec_table **t);
void mac802154_unlock_table(struct net_device *dev);
+int mac802154_wpan_update_llsec(struct net_device *dev);
+
/* interface handling */
int ieee802154_iface_init(void);
void ieee802154_iface_exit(void);
@@ -176,5 +182,6 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
unsigned char name_assign_type, enum nl802154_iftype type,
__le64 extended_addr);
void ieee802154_remove_interfaces(struct ieee802154_local *local);
+void ieee802154_stop_device(struct ieee802154_local *local);
#endif /* __IEEE802154_I_H */
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 8b698246a..ed26952f9 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -30,7 +30,7 @@
#include "ieee802154_i.h"
#include "driver-ops.h"
-static int mac802154_wpan_update_llsec(struct net_device *dev)
+int mac802154_wpan_update_llsec(struct net_device *dev)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
@@ -125,6 +125,14 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
+ /* lowpan need to be down for update
+ * SLAAC address after ifup
+ */
+ if (sdata->wpan_dev.lowpan_dev) {
+ if (netif_running(sdata->wpan_dev.lowpan_dev))
+ return -EBUSY;
+ }
+
ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
return -EINVAL;
@@ -132,6 +140,13 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
sdata->wpan_dev.extended_addr = extended_addr;
+ /* update lowpan interface mac address when
+ * wpan mac has been changed
+ */
+ if (sdata->wpan_dev.lowpan_dev)
+ memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
+ dev->addr_len);
+
return mac802154_wpan_update_llsec(dev);
}
@@ -314,11 +329,8 @@ static int mac802154_slave_close(struct net_device *dev)
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
- if (!local->open_count) {
- flush_workqueue(local->workqueue);
- hrtimer_cancel(&local->ifs_timer);
- drv_stop(local);
- }
+ if (!local->open_count)
+ ieee802154_stop_device(local);
return 0;
}
@@ -471,6 +483,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
enum nl802154_iftype type)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ int ret;
u8 tmp;
/* set some type-dependent values */
@@ -485,8 +498,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
wpan_dev->min_be = 3;
wpan_dev->max_be = 5;
wpan_dev->csma_retries = 4;
- /* for compatibility, actual default is 3 */
- wpan_dev->frame_retries = -1;
+ wpan_dev->frame_retries = 3;
wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
@@ -505,6 +517,10 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
mutex_init(&sdata->sec_mtx);
mac802154_llsec_init(&sdata->sec);
+ ret = mac802154_wpan_update_llsec(sdata->dev);
+ if (ret < 0)
+ return ret;
+
break;
case NL802154_IFTYPE_MONITOR:
sdata->dev->destructor = free_netdev;
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 356b346e1..e8cab5bb8 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -40,7 +40,7 @@ static void ieee802154_tasklet_handler(unsigned long data)
* netstack.
*/
skb->pkt_type = 0;
- ieee802154_rx(&local->hw, skb);
+ ieee802154_rx(local, skb);
break;
default:
WARN(1, "mac802154: Packet is of unknown type %d\n",
@@ -58,11 +58,9 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
struct ieee802154_local *local;
size_t priv_size;
- if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
- !ops->start || !ops->stop || !ops->set_channel) {
- pr_err("undefined IEEE802.15.4 device operations\n");
+ if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
+ !ops->start || !ops->stop || !ops->set_channel))
return NULL;
- }
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wpan_phy priv data for both our ieee802154_local and for
@@ -107,11 +105,13 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
skb_queue_head_init(&local->skb_queue);
+ INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
+
/* init supported flags with 802.15.4 default ranges */
phy->supported.max_minbe = 8;
phy->supported.min_maxbe = 3;
phy->supported.max_maxbe = 8;
- phy->supported.min_frame_retries = -1;
+ phy->supported.min_frame_retries = 0;
phy->supported.max_frame_retries = 7;
phy->supported.max_csma_backoffs = 5;
phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
@@ -177,11 +177,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
}
if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
- /* TODO should be 3, but our default value is -1 which means
- * no ARET handling.
- */
- local->phy->supported.min_frame_retries = -1;
- local->phy->supported.max_frame_retries = -1;
+ local->phy->supported.min_frame_retries = 3;
+ local->phy->supported.max_frame_retries = 3;
}
if (hw->flags & IEEE802154_HW_PROMISCUOUS)
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index d93ad2d4a..d1c33c1d6 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -246,13 +246,15 @@ ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb)
}
}
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
{
- struct ieee802154_local *local = hw_to_local(hw);
u16 crc;
WARN_ON_ONCE(softirq_count() == 0);
+ if (local->suspended)
+ goto drop;
+
/* TODO: When a transceiver omits the checksum here, we
* add an own calculated one. This is currently an ugly
* solution because the monitor needs a crc here.
@@ -273,8 +275,7 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
crc = crc_ccitt(0, skb->data, skb->len);
if (crc) {
rcu_read_unlock();
- kfree_skb(skb);
- return;
+ goto drop;
}
}
/* remove crc */
@@ -283,8 +284,11 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
__ieee802154_rx_handle_packet(local, skb);
rcu_read_unlock();
+
+ return;
+drop:
+ kfree_skb(skb);
}
-EXPORT_SYMBOL(ieee802154_rx);
void
ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index c62e95695..7ed439172 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -30,23 +30,11 @@
#include "ieee802154_i.h"
#include "driver-ops.h"
-/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
- * packets through the workqueue.
- */
-struct ieee802154_xmit_cb {
- struct sk_buff *skb;
- struct work_struct work;
- struct ieee802154_local *local;
-};
-
-static struct ieee802154_xmit_cb ieee802154_xmit_cb;
-
-static void ieee802154_xmit_worker(struct work_struct *work)
+void ieee802154_xmit_worker(struct work_struct *work)
{
- struct ieee802154_xmit_cb *cb =
- container_of(work, struct ieee802154_xmit_cb, work);
- struct ieee802154_local *local = cb->local;
- struct sk_buff *skb = cb->skb;
+ struct ieee802154_local *local =
+ container_of(work, struct ieee802154_local, tx_work);
+ struct sk_buff *skb = local->tx_skb;
struct net_device *dev = skb->dev;
int res;
@@ -106,11 +94,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
} else {
- INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker);
- ieee802154_xmit_cb.skb = skb;
- ieee802154_xmit_cb.local = local;
-
- queue_work(local->workqueue, &ieee802154_xmit_cb.work);
+ local->tx_skb = skb;
+ queue_work(local->workqueue, &local->tx_work);
}
return NETDEV_TX_OK;
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 583435f38..f9fd0957a 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -14,6 +14,7 @@
*/
#include "ieee802154_i.h"
+#include "driver-ops.h"
/* privid for wpan_phys to determine whether they belong to us or not */
const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
@@ -92,3 +93,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
dev_consume_skb_any(skb);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
+
+void ieee802154_stop_device(struct ieee802154_local *local)
+{
+ flush_workqueue(local->workqueue);
+ hrtimer_cancel(&local->ifs_timer);
+ drv_stop(local);
+}
diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig
index 17bde799c..5c467ef97 100644
--- a/net/mpls/Kconfig
+++ b/net/mpls/Kconfig
@@ -24,7 +24,13 @@ config NET_MPLS_GSO
config MPLS_ROUTING
tristate "MPLS: routing support"
- help
+ ---help---
Add support for forwarding of mpls packets.
+config MPLS_IPTUNNEL
+ tristate "MPLS: IP over MPLS tunnel support"
+ depends on LWTUNNEL && MPLS_ROUTING
+ ---help---
+ mpls ip tunnel support.
+
endif # MPLS
diff --git a/net/mpls/Makefile b/net/mpls/Makefile
index 65bbe68c7..9ca923625 100644
--- a/net/mpls/Makefile
+++ b/net/mpls/Makefile
@@ -3,5 +3,6 @@
#
obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+obj-$(CONFIG_MPLS_IPTUNNEL) += mpls_iptunnel.o
mpls_router-y := af_mpls.o
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1f93a5978..bb185a28d 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -15,6 +15,10 @@
#include <net/ip_fib.h>
#include <net/netevent.h>
#include <net/netns/generic.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
#include "internal.h"
#define LABEL_NOT_SPECIFIED (1<<20)
@@ -23,11 +27,23 @@
/* This maximum ha length copied from the definition of struct neighbour */
#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
+enum mpls_payload_type {
+ MPT_UNSPEC, /* IPv4 or IPv6 */
+ MPT_IPV4 = 4,
+ MPT_IPV6 = 6,
+
+ /* Other types not implemented:
+ * - Pseudo-wire with or without control word (RFC4385)
+ * - GAL (RFC5586)
+ */
+};
+
struct mpls_route { /* next hop label forwarding entry */
struct net_device __rcu *rt_dev;
struct rcu_head rt_rcu;
u32 rt_label[MAX_NEW_LABELS];
u8 rt_protocol; /* routing protocol that set this entry */
+ u8 rt_payload_type;
u8 rt_labels;
u8 rt_via_alen;
u8 rt_via_table;
@@ -58,10 +74,11 @@ static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
return rcu_dereference_rtnl(dev->mpls_ptr);
}
-static bool mpls_output_possible(const struct net_device *dev)
+bool mpls_output_possible(const struct net_device *dev)
{
return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
}
+EXPORT_SYMBOL_GPL(mpls_output_possible);
static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
{
@@ -69,13 +86,14 @@ static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
return rt->rt_labels * sizeof(struct mpls_shim_hdr);
}
-static unsigned int mpls_dev_mtu(const struct net_device *dev)
+unsigned int mpls_dev_mtu(const struct net_device *dev)
{
/* The amount of data the layer 2 frame can hold */
return dev->mtu;
}
+EXPORT_SYMBOL_GPL(mpls_dev_mtu);
-static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
{
if (skb->len <= mtu)
return false;
@@ -85,20 +103,13 @@ static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
return true;
}
+EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
struct mpls_entry_decoded dec)
{
- /* RFC4385 and RFC5586 encode other packets in mpls such that
- * they don't conflict with the ip version number, making
- * decoding by examining the ip version correct in everything
- * except for the strangest cases.
- *
- * The strange cases if we choose to support them will require
- * manual configuration.
- */
- struct iphdr *hdr4;
- bool success = true;
+ enum mpls_payload_type payload_type;
+ bool success = false;
/* The IPv4 code below accesses through the IPv4 header
* checksum, which is 12 bytes into the packet.
@@ -113,23 +124,32 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
if (!pskb_may_pull(skb, 12))
return false;
- /* Use ip_hdr to find the ip protocol version */
- hdr4 = ip_hdr(skb);
- if (hdr4->version == 4) {
+ payload_type = rt->rt_payload_type;
+ if (payload_type == MPT_UNSPEC)
+ payload_type = ip_hdr(skb)->version;
+
+ switch (payload_type) {
+ case MPT_IPV4: {
+ struct iphdr *hdr4 = ip_hdr(skb);
skb->protocol = htons(ETH_P_IP);
csum_replace2(&hdr4->check,
htons(hdr4->ttl << 8),
htons(dec.ttl << 8));
hdr4->ttl = dec.ttl;
+ success = true;
+ break;
}
- else if (hdr4->version == 6) {
+ case MPT_IPV6: {
struct ipv6hdr *hdr6 = ipv6_hdr(skb);
skb->protocol = htons(ETH_P_IPV6);
hdr6->hop_limit = dec.ttl;
+ success = true;
+ break;
+ }
+ case MPT_UNSPEC:
+ break;
}
- else
- /* version 0 and version 1 are used by pseudo wires */
- success = false;
+
return success;
}
@@ -248,16 +268,17 @@ static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
};
struct mpls_route_config {
- u32 rc_protocol;
- u32 rc_ifindex;
- u16 rc_via_table;
- u16 rc_via_alen;
- u8 rc_via[MAX_VIA_ALEN];
- u32 rc_label;
- u32 rc_output_labels;
- u32 rc_output_label[MAX_NEW_LABELS];
- u32 rc_nlflags;
- struct nl_info rc_nlinfo;
+ u32 rc_protocol;
+ u32 rc_ifindex;
+ u16 rc_via_table;
+ u16 rc_via_alen;
+ u8 rc_via[MAX_VIA_ALEN];
+ u32 rc_label;
+ u32 rc_output_labels;
+ u32 rc_output_label[MAX_NEW_LABELS];
+ u32 rc_nlflags;
+ enum mpls_payload_type rc_payload_type;
+ struct nl_info rc_nlinfo;
};
static struct mpls_route *mpls_rt_alloc(size_t alen)
@@ -286,7 +307,7 @@ static void mpls_notify_route(struct net *net, unsigned index,
struct mpls_route *rt = new ? new : old;
unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
/* Ignore reserved labels for now */
- if (rt && (index >= 16))
+ if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
}
@@ -320,13 +341,96 @@ static unsigned find_free_label(struct net *net)
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
- for (index = 16; index < platform_labels; index++) {
+ for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
+ index++) {
if (!rtnl_dereference(platform_label[index]))
return index;
}
return LABEL_NOT_SPECIFIED;
}
+#if IS_ENABLED(CONFIG_INET)
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+ struct net_device *dev;
+ struct rtable *rt;
+ struct in_addr daddr;
+
+ memcpy(&daddr, addr, sizeof(struct in_addr));
+ rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
+ if (IS_ERR(rt))
+ return ERR_CAST(rt);
+
+ dev = rt->dst.dev;
+ dev_hold(dev);
+
+ ip_rt_put(rt);
+
+ return dev;
+}
+#else
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+ return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+ struct net_device *dev;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int err;
+
+ if (!ipv6_stub)
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+ err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+ if (err)
+ return ERR_PTR(err);
+
+ dev = dst->dev;
+ dev_hold(dev);
+ dst_release(dst);
+
+ return dev;
+}
+#else
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+ return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+static struct net_device *find_outdev(struct net *net,
+ struct mpls_route_config *cfg)
+{
+ struct net_device *dev = NULL;
+
+ if (!cfg->rc_ifindex) {
+ switch (cfg->rc_via_table) {
+ case NEIGH_ARP_TABLE:
+ dev = inet_fib_lookup_dev(net, cfg->rc_via);
+ break;
+ case NEIGH_ND_TABLE:
+ dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+ break;
+ case NEIGH_LINK_TABLE:
+ break;
+ }
+ } else {
+ dev = dev_get_by_index(net, cfg->rc_ifindex);
+ }
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return dev;
+}
+
static int mpls_route_add(struct mpls_route_config *cfg)
{
struct mpls_route __rcu **platform_label;
@@ -345,8 +449,8 @@ static int mpls_route_add(struct mpls_route_config *cfg)
index = find_free_label(net);
}
- /* The first 16 labels are reserved, and may not be set */
- if (index < 16)
+ /* Reserved labels may not be set */
+ if (index < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
/* The full 20 bit range may not be supported. */
@@ -357,10 +461,12 @@ static int mpls_route_add(struct mpls_route_config *cfg)
if (cfg->rc_output_labels > MAX_NEW_LABELS)
goto errout;
- err = -ENODEV;
- dev = dev_get_by_index(net, cfg->rc_ifindex);
- if (!dev)
+ dev = find_outdev(net, cfg);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ dev = NULL;
goto errout;
+ }
/* Ensure this is a supported device */
err = -EINVAL;
@@ -401,6 +507,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
rt->rt_label[i] = cfg->rc_output_label[i];
rt->rt_protocol = cfg->rc_protocol;
RCU_INIT_POINTER(rt->rt_dev, dev);
+ rt->rt_payload_type = cfg->rc_payload_type;
rt->rt_via_table = cfg->rc_via_table;
memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
@@ -423,8 +530,8 @@ static int mpls_route_del(struct mpls_route_config *cfg)
index = cfg->rc_label;
- /* The first 16 labels are reserved, and may not be removed */
- if (index < 16)
+ /* Reserved labels may not be removed */
+ if (index < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
/* The full 20 bit range may not be supported */
@@ -626,6 +733,7 @@ int nla_put_labels(struct sk_buff *skb, int attrtype,
return 0;
}
+EXPORT_SYMBOL_GPL(nla_put_labels);
int nla_get_labels(const struct nlattr *nla,
u32 max_labels, u32 *labels, u32 label[])
@@ -671,6 +779,7 @@ int nla_get_labels(const struct nlattr *nla,
*labels = nla_labels;
return 0;
}
+EXPORT_SYMBOL_GPL(nla_get_labels);
static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
struct mpls_route_config *cfg)
@@ -740,8 +849,8 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
&cfg->rc_label))
goto errout;
- /* The first 16 labels are reserved, and may not be set */
- if (cfg->rc_label < 16)
+ /* Reserved labels may not be set */
+ if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
break;
@@ -866,8 +975,8 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
ASSERT_RTNL();
index = cb->args[0];
- if (index < 16)
- index = 16;
+ if (index < MPLS_LABEL_FIRST_UNRESERVED)
+ index = MPLS_LABEL_FIRST_UNRESERVED;
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
@@ -953,6 +1062,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
goto nort0;
RCU_INIT_POINTER(rt0->rt_dev, lo);
rt0->rt_protocol = RTPROT_KERNEL;
+ rt0->rt_payload_type = MPT_IPV4;
rt0->rt_via_table = NEIGH_LINK_TABLE;
memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
}
@@ -963,6 +1073,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
goto nort2;
RCU_INIT_POINTER(rt2->rt_dev, lo);
rt2->rt_protocol = RTPROT_KERNEL;
+ rt2->rt_payload_type = MPT_IPV6;
rt2->rt_via_table = NEIGH_LINK_TABLE;
memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
}
@@ -1066,8 +1177,10 @@ static int mpls_net_init(struct net *net)
table[0].data = net;
net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
- if (net->mpls.ctl == NULL)
+ if (net->mpls.ctl == NULL) {
+ kfree(table);
return -ENOMEM;
+ }
return 0;
}
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 8cabeb5a1..2681a4ba6 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -50,7 +50,12 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
return result;
}
-int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels, const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
+ const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+ u32 label[]);
+bool mpls_output_possible(const struct net_device *dev);
+unsigned int mpls_dev_mtu(const struct net_device *dev);
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
#endif /* MPLS_INTERNAL_H */
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
new file mode 100644
index 000000000..21e70bc9a
--- /dev/null
+++ b/net/mpls/mpls_iptunnel.c
@@ -0,0 +1,231 @@
+/*
+ * mpls tunnels An implementation mpls tunnels using the light weight tunnel
+ * infrastructure
+ *
+ * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/lwtunnel.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include <net/ip6_fib.h>
+#include <net/route.h>
+#include <net/mpls_iptunnel.h>
+#include <linux/mpls_iptunnel.h>
+#include "internal.h"
+
+static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
+ [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 },
+};
+
+static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
+{
+ /* The size of the layer 2.5 labels to be added for this route */
+ return en->labels * sizeof(struct mpls_shim_hdr);
+}
+
+int mpls_output(struct sock *sk, struct sk_buff *skb)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+ struct mpls_shim_hdr *hdr;
+ struct net_device *out_dev;
+ unsigned int hh_len;
+ unsigned int new_header_size;
+ unsigned int mtu;
+ struct dst_entry *dst = skb_dst(skb);
+ struct rtable *rt = NULL;
+ struct rt6_info *rt6 = NULL;
+ int err = 0;
+ bool bos;
+ int i;
+ unsigned int ttl;
+
+ /* Obtain the ttl */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ttl = ip_hdr(skb)->ttl;
+ rt = (struct rtable *)dst;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ ttl = ipv6_hdr(skb)->hop_limit;
+ rt6 = (struct rt6_info *)dst;
+ } else {
+ goto drop;
+ }
+
+ skb_orphan(skb);
+
+ /* Find the output device */
+ out_dev = dst->dev;
+ if (!mpls_output_possible(out_dev) ||
+ !dst->lwtstate || skb_warn_if_lro(skb))
+ goto drop;
+
+ skb_forward_csum(skb);
+
+ tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
+
+ /* Verify the destination can hold the packet */
+ new_header_size = mpls_encap_size(tun_encap_info);
+ mtu = mpls_dev_mtu(out_dev);
+ if (mpls_pkt_too_big(skb, mtu - new_header_size))
+ goto drop;
+
+ hh_len = LL_RESERVED_SPACE(out_dev);
+ if (!out_dev->header_ops)
+ hh_len = 0;
+
+ /* Ensure there is enough space for the headers in the skb */
+ if (skb_cow(skb, hh_len + new_header_size))
+ goto drop;
+
+ skb_push(skb, new_header_size);
+ skb_reset_network_header(skb);
+
+ skb->dev = out_dev;
+ skb->protocol = htons(ETH_P_MPLS_UC);
+
+ /* Push the new labels */
+ hdr = mpls_hdr(skb);
+ bos = true;
+ for (i = tun_encap_info->labels - 1; i >= 0; i--) {
+ hdr[i] = mpls_entry_encode(tun_encap_info->label[i],
+ ttl, 0, bos);
+ bos = false;
+ }
+
+ if (rt)
+ err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
+ skb);
+ else if (rt6)
+ err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+ skb);
+ if (err)
+ net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+ __func__, err);
+
+ return 0;
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+ struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
+ struct lwtunnel_state *newts;
+ int tun_encap_info_len;
+ int ret;
+
+ ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
+ mpls_iptunnel_policy);
+ if (ret < 0)
+ return ret;
+
+ if (!tb[MPLS_IPTUNNEL_DST])
+ return -EINVAL;
+
+ tun_encap_info_len = sizeof(*tun_encap_info);
+
+ newts = lwtunnel_state_alloc(tun_encap_info_len);
+ if (!newts)
+ return -ENOMEM;
+
+ newts->len = tun_encap_info_len;
+ tun_encap_info = mpls_lwtunnel_encap(newts);
+ ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+ &tun_encap_info->labels, tun_encap_info->label);
+ if (ret)
+ goto errout;
+ newts->type = LWTUNNEL_ENCAP_MPLS;
+ newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+
+ *ts = newts;
+
+ return 0;
+
+errout:
+ kfree(newts);
+ *ts = NULL;
+
+ return ret;
+}
+
+static int mpls_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+
+ tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+ if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
+ tun_encap_info->label))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ struct mpls_iptunnel_encap *tun_encap_info;
+
+ tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+ return nla_total_size(tun_encap_info->labels * 4);
+}
+
+static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a);
+ struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
+ int l;
+
+ if (a_hdr->labels != b_hdr->labels)
+ return 1;
+
+ for (l = 0; l < MAX_NEW_LABELS; l++)
+ if (a_hdr->label[l] != b_hdr->label[l])
+ return 1;
+ return 0;
+}
+
+static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+ .build_state = mpls_build_state,
+ .output = mpls_output,
+ .fill_encap = mpls_fill_encap_info,
+ .get_encap_size = mpls_encap_nlsize,
+ .cmp_encap = mpls_encap_cmp,
+};
+
+static int __init mpls_iptunnel_init(void)
+{
+ return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_init(mpls_iptunnel_init);
+
+static void __exit mpls_iptunnel_exit(void)
+{
+ lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_exit(mpls_iptunnel_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
+MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 6eae69a69..3e1b4abf1 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -867,6 +867,8 @@ config NETFILTER_XT_TARGET_TEE
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK
+ select NF_DUP_IPV4
+ select NF_DUP_IPV6 if IP6_NF_IPTABLES
---help---
This option adds a "TEE" target with which a packet can be cloned and
this clone be rerouted to another nexthop.
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a0e54974e..21a085686 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -34,6 +34,9 @@ EXPORT_SYMBOL(nf_afinfo);
const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_ipv6_ops);
+DEFINE_PER_CPU(bool, nf_skb_duplicated);
+EXPORT_SYMBOL_GPL(nf_skb_duplicated);
+
int nf_register_afinfo(const struct nf_afinfo *afinfo)
{
mutex_lock(&afinfo_mutex);
@@ -52,9 +55,6 @@ void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
}
EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
-struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
-EXPORT_SYMBOL(nf_hooks);
-
#ifdef HAVE_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
@@ -62,63 +62,168 @@ EXPORT_SYMBOL(nf_hooks_needed);
static DEFINE_MUTEX(nf_hook_mutex);
-int nf_register_hook(struct nf_hook_ops *reg)
+static struct list_head *nf_find_hook_list(struct net *net,
+ const struct nf_hook_ops *reg)
{
- struct list_head *nf_hook_list;
- struct nf_hook_ops *elem;
+ struct list_head *hook_list = NULL;
- mutex_lock(&nf_hook_mutex);
- switch (reg->pf) {
- case NFPROTO_NETDEV:
+ if (reg->pf != NFPROTO_NETDEV)
+ hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
+ else if (reg->hooknum == NF_NETDEV_INGRESS) {
#ifdef CONFIG_NETFILTER_INGRESS
- if (reg->hooknum == NF_NETDEV_INGRESS) {
- BUG_ON(reg->dev == NULL);
- nf_hook_list = &reg->dev->nf_hooks_ingress;
- net_inc_ingress_queue();
- break;
- }
+ if (reg->dev && dev_net(reg->dev) == net)
+ hook_list = &reg->dev->nf_hooks_ingress;
#endif
- /* Fall through. */
- default:
- nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
- break;
}
+ return hook_list;
+}
+
+struct nf_hook_entry {
+ const struct nf_hook_ops *orig_ops;
+ struct nf_hook_ops ops;
+};
+
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+ struct list_head *hook_list;
+ struct nf_hook_entry *entry;
+ struct nf_hook_ops *elem;
- list_for_each_entry(elem, nf_hook_list, list) {
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->orig_ops = reg;
+ entry->ops = *reg;
+
+ hook_list = nf_find_hook_list(net, reg);
+ if (!hook_list) {
+ kfree(entry);
+ return -ENOENT;
+ }
+
+ mutex_lock(&nf_hook_mutex);
+ list_for_each_entry(elem, hook_list, list) {
if (reg->priority < elem->priority)
break;
}
- list_add_rcu(&reg->list, elem->list.prev);
+ list_add_rcu(&entry->ops.list, elem->list.prev);
mutex_unlock(&nf_hook_mutex);
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+ net_inc_ingress_queue();
+#endif
#ifdef HAVE_JUMP_LABEL
static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
return 0;
}
-EXPORT_SYMBOL(nf_register_hook);
+EXPORT_SYMBOL(nf_register_net_hook);
-void nf_unregister_hook(struct nf_hook_ops *reg)
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
+ struct list_head *hook_list;
+ struct nf_hook_entry *entry;
+ struct nf_hook_ops *elem;
+
+ hook_list = nf_find_hook_list(net, reg);
+ if (!hook_list)
+ return;
+
mutex_lock(&nf_hook_mutex);
- list_del_rcu(&reg->list);
- mutex_unlock(&nf_hook_mutex);
- switch (reg->pf) {
- case NFPROTO_NETDEV:
-#ifdef CONFIG_NETFILTER_INGRESS
- if (reg->hooknum == NF_NETDEV_INGRESS) {
- net_dec_ingress_queue();
+ list_for_each_entry(elem, hook_list, list) {
+ entry = container_of(elem, struct nf_hook_entry, ops);
+ if (entry->orig_ops == reg) {
+ list_del_rcu(&entry->ops.list);
break;
}
- break;
-#endif
- default:
- break;
}
+ mutex_unlock(&nf_hook_mutex);
+ if (&elem->list == hook_list) {
+ WARN(1, "nf_unregister_net_hook: hook not found!\n");
+ return;
+ }
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+ net_dec_ingress_queue();
+#endif
#ifdef HAVE_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
synchronize_net();
- nf_queue_nf_hook_drop(reg);
+ nf_queue_nf_hook_drop(net, &entry->ops);
+ /* other cpu might still process nfqueue verdict that used reg */
+ synchronize_net();
+ kfree(entry);
+}
+EXPORT_SYMBOL(nf_unregister_net_hook);
+
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < n; i++) {
+ err = nf_register_net_hook(net, &reg[i]);
+ if (err)
+ goto err;
+ }
+ return err;
+
+err:
+ if (i > 0)
+ nf_unregister_net_hooks(net, reg, i);
+ return err;
+}
+EXPORT_SYMBOL(nf_register_net_hooks);
+
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+ unsigned int n)
+{
+ while (n-- > 0)
+ nf_unregister_net_hook(net, &reg[n]);
+}
+EXPORT_SYMBOL(nf_unregister_net_hooks);
+
+static LIST_HEAD(nf_hook_list);
+
+int nf_register_hook(struct nf_hook_ops *reg)
+{
+ struct net *net, *last;
+ int ret;
+
+ rtnl_lock();
+ for_each_net(net) {
+ ret = nf_register_net_hook(net, reg);
+ if (ret && ret != -ENOENT)
+ goto rollback;
+ }
+ list_add_tail(&reg->list, &nf_hook_list);
+ rtnl_unlock();
+
+ return 0;
+rollback:
+ last = net;
+ for_each_net(net) {
+ if (net == last)
+ break;
+ nf_unregister_net_hook(net, reg);
+ }
+ rtnl_unlock();
+ return ret;
+}
+EXPORT_SYMBOL(nf_register_hook);
+
+void nf_unregister_hook(struct nf_hook_ops *reg)
+{
+ struct net *net;
+
+ rtnl_lock();
+ list_del(&reg->list);
+ for_each_net(net)
+ nf_unregister_net_hook(net, reg);
+ rtnl_unlock();
}
EXPORT_SYMBOL(nf_unregister_hook);
@@ -285,9 +390,12 @@ EXPORT_SYMBOL(nf_conntrack_destroy);
struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nfq_ct_hook);
-struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
-EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
-
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt = {
+ .id = NF_CT_DEFAULT_ZONE_ID,
+ .dir = NF_CT_DEFAULT_ZONE_DIR,
+};
+EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
#endif /* CONFIG_NF_CONNTRACK */
#ifdef CONFIG_NF_NAT_NEEDED
@@ -295,8 +403,46 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
EXPORT_SYMBOL(nf_nat_decode_session_hook);
#endif
+static int nf_register_hook_list(struct net *net)
+{
+ struct nf_hook_ops *elem;
+ int ret;
+
+ rtnl_lock();
+ list_for_each_entry(elem, &nf_hook_list, list) {
+ ret = nf_register_net_hook(net, elem);
+ if (ret && ret != -ENOENT)
+ goto out_undo;
+ }
+ rtnl_unlock();
+ return 0;
+
+out_undo:
+ list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
+ nf_unregister_net_hook(net, elem);
+ rtnl_unlock();
+ return ret;
+}
+
+static void nf_unregister_hook_list(struct net *net)
+{
+ struct nf_hook_ops *elem;
+
+ rtnl_lock();
+ list_for_each_entry(elem, &nf_hook_list, list)
+ nf_unregister_net_hook(net, elem);
+ rtnl_unlock();
+}
+
static int __net_init netfilter_net_init(struct net *net)
{
+ int i, h, ret;
+
+ for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
+ for (h = 0; h < NF_MAX_HOOKS; h++)
+ INIT_LIST_HEAD(&net->nf.hooks[i][h]);
+ }
+
#ifdef CONFIG_PROC_FS
net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
net->proc_net);
@@ -307,11 +453,16 @@ static int __net_init netfilter_net_init(struct net *net)
return -ENOMEM;
}
#endif
- return 0;
+ ret = nf_register_hook_list(net);
+ if (ret)
+ remove_proc_entry("netfilter", net->proc_net);
+
+ return ret;
}
static void __net_exit netfilter_net_exit(struct net *net)
{
+ nf_unregister_hook_list(net);
remove_proc_entry("netfilter", net->proc_net);
}
@@ -322,12 +473,7 @@ static struct pernet_operations netfilter_net_ops = {
int __init netfilter_init(void)
{
- int i, h, ret;
-
- for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
- for (h = 0; h < NF_MAX_HOOKS; h++)
- INIT_LIST_HEAD(&nf_hooks[i][h]);
- }
+ int ret;
ret = register_pernet_subsys(&netfilter_net_ops);
if (ret < 0)
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a1fe5377a..5a30ce6e8 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -297,7 +297,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
ip_set_timeout_expired(ext_timeout(n, set))))
n = NULL;
- e = kzalloc(set->dsize, GFP_KERNEL);
+ e = kzalloc(set->dsize, GFP_ATOMIC);
if (!e)
return -ENOMEM;
e->id = d->id;
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 3b6929dec..b32fb0dbe 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -162,6 +162,17 @@ config IP_VS_FO
If you want to compile it in kernel, say Y. To compile it as a
module, choose M here. If unsure, say N.
+config IP_VS_OVF
+ tristate "weighted overflow scheduling"
+ ---help---
+ The weighted overflow scheduling algorithm directs network
+ connections to the server with the highest weight that is
+ currently available and overflows to the next when active
+ connections exceed the node's weight.
+
+ If you want to compile it in kernel, say Y. To compile it as a
+ module, choose M here. If unsure, say N.
+
config IP_VS_LBLC
tristate "locality-based least-connection scheduling"
---help---
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 38b2723b2..67f3f4389 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_IP_VS_WRR) += ip_vs_wrr.o
obj-$(CONFIG_IP_VS_LC) += ip_vs_lc.o
obj-$(CONFIG_IP_VS_WLC) += ip_vs_wlc.o
obj-$(CONFIG_IP_VS_FO) += ip_vs_fo.o
+obj-$(CONFIG_IP_VS_OVF) += ip_vs_ovf.o
obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o
obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o
obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 24c554201..1a23e91d5 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2335,13 +2335,23 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- mutex_lock(&ipvs->sync_mutex);
- if (cmd == IP_VS_SO_SET_STARTDAEMON)
- ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
- dm->syncid);
- else
+ if (cmd == IP_VS_SO_SET_STARTDAEMON) {
+ struct ipvs_sync_daemon_cfg cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
+ sizeof(cfg.mcast_ifn));
+ cfg.syncid = dm->syncid;
+ rtnl_lock();
+ mutex_lock(&ipvs->sync_mutex);
+ ret = start_sync_thread(net, &cfg, dm->state);
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
+ } else {
+ mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(net, dm->state);
- mutex_unlock(&ipvs->sync_mutex);
+ mutex_unlock(&ipvs->sync_mutex);
+ }
goto out_dec;
}
@@ -2645,15 +2655,15 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
mutex_lock(&ipvs->sync_mutex);
if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
- strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+ strlcpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn,
sizeof(d[0].mcast_ifn));
- d[0].syncid = ipvs->master_syncid;
+ d[0].syncid = ipvs->mcfg.syncid;
}
if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
- strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+ strlcpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn,
sizeof(d[1].mcast_ifn));
- d[1].syncid = ipvs->backup_syncid;
+ d[1].syncid = ipvs->bcfg.syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
@@ -2808,6 +2818,11 @@ static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
.len = IP_VS_IFNAME_MAXLEN },
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
+ [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
+ [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
+ [IPVS_DAEMON_ATTR_MCAST_GROUP6] = { .len = sizeof(struct in6_addr) },
+ [IPVS_DAEMON_ATTR_MCAST_PORT] = { .type = NLA_U16 },
+ [IPVS_DAEMON_ATTR_MCAST_TTL] = { .type = NLA_U8 },
};
/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
@@ -3266,7 +3281,7 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
}
static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state,
- const char *mcast_ifn, __u32 syncid)
+ struct ipvs_sync_daemon_cfg *c)
{
struct nlattr *nl_daemon;
@@ -3275,9 +3290,23 @@ static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state,
return -EMSGSIZE;
if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
- nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
- nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
+ nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, c->mcast_ifn) ||
+ nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, c->syncid) ||
+ nla_put_u16(skb, IPVS_DAEMON_ATTR_SYNC_MAXLEN, c->sync_maxlen) ||
+ nla_put_u16(skb, IPVS_DAEMON_ATTR_MCAST_PORT, c->mcast_port) ||
+ nla_put_u8(skb, IPVS_DAEMON_ATTR_MCAST_TTL, c->mcast_ttl))
goto nla_put_failure;
+#ifdef CONFIG_IP_VS_IPV6
+ if (c->mcast_af == AF_INET6) {
+ if (nla_put_in6_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP6,
+ &c->mcast_group.in6))
+ goto nla_put_failure;
+ } else
+#endif
+ if (c->mcast_af == AF_INET &&
+ nla_put_in_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP,
+ c->mcast_group.ip))
+ goto nla_put_failure;
nla_nest_end(skb, nl_daemon);
return 0;
@@ -3288,7 +3317,7 @@ nla_put_failure:
}
static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
- const char *mcast_ifn, __u32 syncid,
+ struct ipvs_sync_daemon_cfg *c,
struct netlink_callback *cb)
{
void *hdr;
@@ -3298,7 +3327,7 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
if (!hdr)
return -EMSGSIZE;
- if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
+ if (ip_vs_genl_fill_daemon(skb, state, c))
goto nla_put_failure;
genlmsg_end(skb, hdr);
@@ -3318,8 +3347,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
mutex_lock(&ipvs->sync_mutex);
if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
- ipvs->master_mcast_ifn,
- ipvs->master_syncid, cb) < 0)
+ &ipvs->mcfg, cb) < 0)
goto nla_put_failure;
cb->args[0] = 1;
@@ -3327,8 +3355,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
- ipvs->backup_mcast_ifn,
- ipvs->backup_syncid, cb) < 0)
+ &ipvs->bcfg, cb) < 0)
goto nla_put_failure;
cb->args[1] = 1;
@@ -3342,30 +3369,83 @@ nla_put_failure:
static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ipvs_sync_daemon_cfg c;
+ struct nlattr *a;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
+ strlcpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
+ sizeof(c.mcast_ifn));
+ c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]);
+
+ a = attrs[IPVS_DAEMON_ATTR_SYNC_MAXLEN];
+ if (a)
+ c.sync_maxlen = nla_get_u16(a);
+
+ a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP];
+ if (a) {
+ c.mcast_af = AF_INET;
+ c.mcast_group.ip = nla_get_in_addr(a);
+ if (!ipv4_is_multicast(c.mcast_group.ip))
+ return -EINVAL;
+ } else {
+ a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP6];
+ if (a) {
+#ifdef CONFIG_IP_VS_IPV6
+ int addr_type;
+
+ c.mcast_af = AF_INET6;
+ c.mcast_group.in6 = nla_get_in6_addr(a);
+ addr_type = ipv6_addr_type(&c.mcast_group.in6);
+ if (!(addr_type & IPV6_ADDR_MULTICAST))
+ return -EINVAL;
+#else
+ return -EAFNOSUPPORT;
+#endif
+ }
+ }
+
+ a = attrs[IPVS_DAEMON_ATTR_MCAST_PORT];
+ if (a)
+ c.mcast_port = nla_get_u16(a);
+
+ a = attrs[IPVS_DAEMON_ATTR_MCAST_TTL];
+ if (a)
+ c.mcast_ttl = nla_get_u8(a);
/* The synchronization protocol is incompatible with mixed family
* services
*/
- if (net_ipvs(net)->mixed_address_family_dests > 0)
+ if (ipvs->mixed_address_family_dests > 0)
return -EINVAL;
- return start_sync_thread(net,
- nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
- nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
- nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
+ rtnl_lock();
+ mutex_lock(&ipvs->sync_mutex);
+ ret = start_sync_thread(net, &c,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
+ return ret;
}
static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ int ret;
+
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
- return stop_sync_thread(net,
- nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+ mutex_lock(&ipvs->sync_mutex);
+ ret = stop_sync_thread(net,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+ mutex_unlock(&ipvs->sync_mutex);
+ return ret;
}
static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
@@ -3389,7 +3469,7 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
{
- int ret = 0, cmd;
+ int ret = -EINVAL, cmd;
struct net *net;
struct netns_ipvs *ipvs;
@@ -3400,22 +3480,19 @@ static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
- mutex_lock(&ipvs->sync_mutex);
if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
info->attrs[IPVS_CMD_ATTR_DAEMON],
- ip_vs_daemon_policy)) {
- ret = -EINVAL;
+ ip_vs_daemon_policy))
goto out;
- }
if (cmd == IPVS_CMD_NEW_DAEMON)
ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
ret = ip_vs_genl_del_daemon(net, daemon_attrs);
-out:
- mutex_unlock(&ipvs->sync_mutex);
}
+
+out:
return ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 5882bbfd1..136184572 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -274,7 +274,7 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
" for conn " FMT_CONN "\n",
__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
- h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+ h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
&tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/ipvs/ip_vs_ovf.c b/net/netfilter/ipvs/ip_vs_ovf.c
new file mode 100644
index 000000000..f7d62c3b7
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_ovf.c
@@ -0,0 +1,86 @@
+/*
+ * IPVS: Overflow-Connection Scheduling module
+ *
+ * Authors: Raducu Deaconu <rhadoo_io@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Scheduler implements "overflow" loadbalancing according to number of active
+ * connections , will keep all conections to the node with the highest weight
+ * and overflow to the next node if the number of connections exceeds the node's
+ * weight.
+ * Note that this scheduler might not be suitable for UDP because it only uses
+ * active connections
+ *
+ */
+
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <net/ip_vs.h>
+
+/* OVF Connection scheduling */
+static struct ip_vs_dest *
+ip_vs_ovf_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
+ struct ip_vs_iphdr *iph)
+{
+ struct ip_vs_dest *dest, *h = NULL;
+ int hw = 0, w;
+
+ IP_VS_DBG(6, "ip_vs_ovf_schedule(): Scheduling...\n");
+ /* select the node with highest weight, go to next in line if active
+ * connections exceed weight
+ */
+ list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
+ w = atomic_read(&dest->weight);
+ if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
+ atomic_read(&dest->activeconns) > w ||
+ w == 0)
+ continue;
+ if (!h || w > hw) {
+ h = dest;
+ hw = w;
+ }
+ }
+
+ if (h) {
+ IP_VS_DBG_BUF(6, "OVF: server %s:%u active %d w %d\n",
+ IP_VS_DBG_ADDR(h->af, &h->addr),
+ ntohs(h->port),
+ atomic_read(&h->activeconns),
+ atomic_read(&h->weight));
+ return h;
+ }
+
+ ip_vs_scheduler_err(svc, "no destination available");
+ return NULL;
+}
+
+static struct ip_vs_scheduler ip_vs_ovf_scheduler = {
+ .name = "ovf",
+ .refcnt = ATOMIC_INIT(0),
+ .module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_ovf_scheduler.n_list),
+ .schedule = ip_vs_ovf_schedule,
+};
+
+static int __init ip_vs_ovf_init(void)
+{
+ return register_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+}
+
+static void __exit ip_vs_ovf_cleanup(void)
+{
+ unregister_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+ synchronize_rcu();
+}
+
+module_init(ip_vs_ovf_init);
+module_exit(ip_vs_ovf_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 7e8141647..a2ff7d746 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -137,7 +137,7 @@ struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name)
void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
{
- if (scheduler && scheduler->module)
+ if (scheduler)
module_put(scheduler->module);
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index d99ad93eb..43f140950 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -262,6 +262,11 @@ struct ip_vs_sync_mesg {
/* ip_vs_sync_conn entries start here */
};
+union ipvs_sockaddr {
+ struct sockaddr_in in;
+ struct sockaddr_in6 in6;
+};
+
struct ip_vs_sync_buff {
struct list_head list;
unsigned long firstuse;
@@ -320,26 +325,28 @@ sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
* Create a new sync buffer for Version 1 proto.
*/
static inline struct ip_vs_sync_buff *
-ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs, unsigned int len)
{
struct ip_vs_sync_buff *sb;
if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
return NULL;
- sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg),
+ ipvs->mcfg.sync_maxlen);
+ sb->mesg = kmalloc(len, GFP_ATOMIC);
if (!sb->mesg) {
kfree(sb);
return NULL;
}
sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */
sb->mesg->version = SYNC_PROTO_VER;
- sb->mesg->syncid = ipvs->master_syncid;
+ sb->mesg->syncid = ipvs->mcfg.syncid;
sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg));
sb->mesg->nr_conns = 0;
sb->mesg->spare = 0;
sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
- sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+ sb->end = (unsigned char *)sb->mesg + len;
sb->firstuse = jiffies;
return sb;
@@ -402,7 +409,7 @@ select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp)
* Create a new sync buffer for Version 0 proto.
*/
static inline struct ip_vs_sync_buff *
-ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs, unsigned int len)
{
struct ip_vs_sync_buff *sb;
struct ip_vs_sync_mesg_v0 *mesg;
@@ -410,17 +417,19 @@ ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
return NULL;
- sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg_v0),
+ ipvs->mcfg.sync_maxlen);
+ sb->mesg = kmalloc(len, GFP_ATOMIC);
if (!sb->mesg) {
kfree(sb);
return NULL;
}
mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
mesg->nr_conns = 0;
- mesg->syncid = ipvs->master_syncid;
+ mesg->syncid = ipvs->mcfg.syncid;
mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0));
sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
- sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+ sb->end = (unsigned char *)mesg + len;
sb->firstuse = jiffies;
return sb;
}
@@ -533,7 +542,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
struct ip_vs_sync_buff *buff;
struct ipvs_master_sync_state *ms;
int id;
- int len;
+ unsigned int len;
if (unlikely(cp->af != AF_INET))
return;
@@ -553,17 +562,19 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
id = select_master_thread_id(ipvs, cp);
ms = &ipvs->ms[id];
buff = ms->sync_buff;
+ len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
+ SIMPLE_CONN_SIZE;
if (buff) {
m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
/* Send buffer if it is for v1 */
- if (!m->nr_conns) {
+ if (buff->head + len > buff->end || !m->nr_conns) {
sb_queue_tail(ipvs, ms);
ms->sync_buff = NULL;
buff = NULL;
}
}
if (!buff) {
- buff = ip_vs_sync_buff_create_v0(ipvs);
+ buff = ip_vs_sync_buff_create_v0(ipvs, len);
if (!buff) {
spin_unlock_bh(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
@@ -572,8 +583,6 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
ms->sync_buff = buff;
}
- len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
- SIMPLE_CONN_SIZE;
m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
s = (struct ip_vs_sync_conn_v0 *) buff->head;
@@ -597,12 +606,6 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
m->nr_conns++;
m->size = htons(ntohs(m->size) + len);
buff->head += len;
-
- /* check if there is a space for next one */
- if (buff->head + FULL_CONN_SIZE > buff->end) {
- sb_queue_tail(ipvs, ms);
- ms->sync_buff = NULL;
- }
spin_unlock_bh(&ipvs->sync_buff_lock);
/* synchronize its controller if it has */
@@ -694,7 +697,7 @@ sloop:
}
if (!buff) {
- buff = ip_vs_sync_buff_create(ipvs);
+ buff = ip_vs_sync_buff_create(ipvs, len);
if (!buff) {
spin_unlock_bh(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
@@ -1219,7 +1222,7 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer,
return;
}
/* SyncID sanity check */
- if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+ if (ipvs->bcfg.syncid != 0 && m2->syncid != ipvs->bcfg.syncid) {
IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
return;
}
@@ -1303,6 +1306,14 @@ static void set_mcast_loop(struct sock *sk, u_char loop)
/* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
lock_sock(sk);
inet->mc_loop = loop ? 1 : 0;
+#ifdef CONFIG_IP_VS_IPV6
+ if (sk->sk_family == AF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ /* IPV6_MULTICAST_LOOP */
+ np->mc_loop = loop ? 1 : 0;
+ }
+#endif
release_sock(sk);
}
@@ -1316,6 +1327,33 @@ static void set_mcast_ttl(struct sock *sk, u_char ttl)
/* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
lock_sock(sk);
inet->mc_ttl = ttl;
+#ifdef CONFIG_IP_VS_IPV6
+ if (sk->sk_family == AF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ /* IPV6_MULTICAST_HOPS */
+ np->mcast_hops = ttl;
+ }
+#endif
+ release_sock(sk);
+}
+
+/* Control fragmentation of messages */
+static void set_mcast_pmtudisc(struct sock *sk, int val)
+{
+ struct inet_sock *inet = inet_sk(sk);
+
+ /* setsockopt(sock, SOL_IP, IP_MTU_DISCOVER, &val, sizeof(val)); */
+ lock_sock(sk);
+ inet->pmtudisc = val;
+#ifdef CONFIG_IP_VS_IPV6
+ if (sk->sk_family == AF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ /* IPV6_MTU_DISCOVER */
+ np->pmtudisc = val;
+ }
+#endif
release_sock(sk);
}
@@ -1338,44 +1376,15 @@ static int set_mcast_if(struct sock *sk, char *ifname)
lock_sock(sk);
inet->mc_index = dev->ifindex;
/* inet->mc_addr = 0; */
- release_sock(sk);
-
- return 0;
-}
-
+#ifdef CONFIG_IP_VS_IPV6
+ if (sk->sk_family == AF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
-/*
- * Set the maximum length of sync message according to the
- * specified interface's MTU.
- */
-static int set_sync_mesg_maxlen(struct net *net, int sync_state)
-{
- struct netns_ipvs *ipvs = net_ipvs(net);
- struct net_device *dev;
- int num;
-
- if (sync_state == IP_VS_STATE_MASTER) {
- dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
- if (!dev)
- return -ENODEV;
-
- num = (dev->mtu - sizeof(struct iphdr) -
- sizeof(struct udphdr) -
- SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
- ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
- SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
- IP_VS_DBG(7, "setting the maximum length of sync sending "
- "message %d.\n", ipvs->send_mesg_maxlen);
- } else if (sync_state == IP_VS_STATE_BACKUP) {
- dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
- if (!dev)
- return -ENODEV;
-
- ipvs->recv_mesg_maxlen = dev->mtu -
- sizeof(struct iphdr) - sizeof(struct udphdr);
- IP_VS_DBG(7, "setting the maximum length of sync receiving "
- "message %d.\n", ipvs->recv_mesg_maxlen);
+ /* IPV6_MULTICAST_IF */
+ np->mcast_oif = dev->ifindex;
}
+#endif
+ release_sock(sk);
return 0;
}
@@ -1405,15 +1414,34 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
mreq.imr_ifindex = dev->ifindex;
- rtnl_lock();
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
- rtnl_unlock();
return ret;
}
+#ifdef CONFIG_IP_VS_IPV6
+static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+ char *ifname)
+{
+ struct net *net = sock_net(sk);
+ struct net_device *dev;
+ int ret;
+
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
+ return -ENODEV;
+ if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+ return -EINVAL;
+
+ lock_sock(sk);
+ ret = ipv6_sock_mc_join(sk, dev->ifindex, addr);
+ release_sock(sk);
+
+ return ret;
+}
+#endif
static int bind_mcastif_addr(struct socket *sock, char *ifname)
{
@@ -1442,6 +1470,26 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
}
+static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
+ struct ipvs_sync_daemon_cfg *c, int id)
+{
+ if (AF_INET6 == c->mcast_af) {
+ sa->in6 = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_port = htons(c->mcast_port + id),
+ };
+ sa->in6.sin6_addr = c->mcast_group.in6;
+ *salen = sizeof(sa->in6);
+ } else {
+ sa->in = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_port = htons(c->mcast_port + id),
+ };
+ sa->in.sin_addr = c->mcast_group.in;
+ *salen = sizeof(sa->in);
+ }
+}
+
/*
* Set up sending multicast socket over UDP
*/
@@ -1449,40 +1497,43 @@ static struct socket *make_send_sock(struct net *net, int id)
{
struct netns_ipvs *ipvs = net_ipvs(net);
/* multicast addr */
- struct sockaddr_in mcast_addr = {
- .sin_family = AF_INET,
- .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id),
- .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
- };
+ union ipvs_sockaddr mcast_addr;
struct socket *sock;
- int result;
+ int result, salen;
/* First create a socket */
- result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = sock_create_kern(net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
+ IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
- result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
+ result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
}
set_mcast_loop(sock->sk, 0);
- set_mcast_ttl(sock->sk, 1);
+ set_mcast_ttl(sock->sk, ipvs->mcfg.mcast_ttl);
+ /* Allow fragmentation if MTU changes */
+ set_mcast_pmtudisc(sock->sk, IP_PMTUDISC_DONT);
result = sysctl_sync_sock_size(ipvs);
if (result > 0)
set_sock_size(sock->sk, 1, result);
- result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
+ if (AF_INET == ipvs->mcfg.mcast_af)
+ result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+ else
+ result = 0;
if (result < 0) {
pr_err("Error binding address of the mcast interface\n");
goto error;
}
+ get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
- sizeof(struct sockaddr), 0);
+ salen, 0);
if (result < 0) {
pr_err("Error connecting to the multicast addr\n");
goto error;
@@ -1503,16 +1554,13 @@ static struct socket *make_receive_sock(struct net *net, int id)
{
struct netns_ipvs *ipvs = net_ipvs(net);
/* multicast addr */
- struct sockaddr_in mcast_addr = {
- .sin_family = AF_INET,
- .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id),
- .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
- };
+ union ipvs_sockaddr mcast_addr;
struct socket *sock;
- int result;
+ int result, salen;
/* First create a socket */
- result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = sock_create_kern(net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
+ IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
@@ -1523,17 +1571,22 @@ static struct socket *make_receive_sock(struct net *net, int id)
if (result > 0)
set_sock_size(sock->sk, 0, result);
- result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
- sizeof(struct sockaddr));
+ get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+ result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
goto error;
}
/* join the multicast group */
- result = join_mcast_group(sock->sk,
- (struct in_addr *) &mcast_addr.sin_addr,
- ipvs->backup_mcast_ifn);
+#ifdef CONFIG_IP_VS_IPV6
+ if (ipvs->bcfg.mcast_af == AF_INET6)
+ result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
+ ipvs->bcfg.mcast_ifn);
+ else
+#endif
+ result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
+ ipvs->bcfg.mcast_ifn);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
@@ -1641,7 +1694,7 @@ static int sync_thread_master(void *data)
pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
"syncid = %d, id = %d\n",
- ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id);
+ ipvs->mcfg.mcast_ifn, ipvs->mcfg.syncid, tinfo->id);
for (;;) {
sb = next_sync_buff(ipvs, ms);
@@ -1695,7 +1748,7 @@ static int sync_thread_backup(void *data)
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
"syncid = %d, id = %d\n",
- ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id);
+ ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
while (!kthread_should_stop()) {
wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -1705,7 +1758,7 @@ static int sync_thread_backup(void *data)
/* do we have data now? */
while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
- ipvs->recv_mesg_maxlen);
+ ipvs->bcfg.sync_maxlen);
if (len <= 0) {
if (len != -EAGAIN)
pr_err("receiving message error\n");
@@ -1725,16 +1778,19 @@ static int sync_thread_backup(void *data)
}
-int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *c,
+ int state)
{
struct ip_vs_sync_thread_data *tinfo;
struct task_struct **array = NULL, *task;
struct socket *sock;
struct netns_ipvs *ipvs = net_ipvs(net);
+ struct net_device *dev;
char *name;
int (*threadfn)(void *data);
- int id, count;
+ int id, count, hlen;
int result = -ENOMEM;
+ u16 mtu, min_mtu;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
@@ -1746,22 +1802,46 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
} else
count = ipvs->threads_mask + 1;
+ if (c->mcast_af == AF_UNSPEC) {
+ c->mcast_af = AF_INET;
+ c->mcast_group.ip = cpu_to_be32(IP_VS_SYNC_GROUP);
+ }
+ if (!c->mcast_port)
+ c->mcast_port = IP_VS_SYNC_PORT;
+ if (!c->mcast_ttl)
+ c->mcast_ttl = 1;
+
+ dev = __dev_get_by_name(net, c->mcast_ifn);
+ if (!dev) {
+ pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
+ return -ENODEV;
+ }
+ hlen = (AF_INET6 == c->mcast_af) ?
+ sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
+ sizeof(struct iphdr) + sizeof(struct udphdr);
+ mtu = (state == IP_VS_STATE_BACKUP) ?
+ clamp(dev->mtu, 1500U, 65535U) : 1500U;
+ min_mtu = (state == IP_VS_STATE_BACKUP) ? 1024 : 1;
+
+ if (c->sync_maxlen)
+ c->sync_maxlen = clamp_t(unsigned int,
+ c->sync_maxlen, min_mtu,
+ 65535 - hlen);
+ else
+ c->sync_maxlen = mtu - hlen;
+
if (state == IP_VS_STATE_MASTER) {
if (ipvs->ms)
return -EEXIST;
- strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
- sizeof(ipvs->master_mcast_ifn));
- ipvs->master_syncid = syncid;
+ ipvs->mcfg = *c;
name = "ipvs-m:%d:%d";
threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) {
if (ipvs->backup_threads)
return -EEXIST;
- strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
- sizeof(ipvs->backup_mcast_ifn));
- ipvs->backup_syncid = syncid;
+ ipvs->bcfg = *c;
name = "ipvs-b:%d:%d";
threadfn = sync_thread_backup;
} else {
@@ -1789,7 +1869,6 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
if (!array)
goto out;
}
- set_sync_mesg_maxlen(net, state);
tinfo = NULL;
for (id = 0; id < count; id++) {
@@ -1807,7 +1886,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
tinfo->net = net;
tinfo->sock = sock;
if (state == IP_VS_STATE_BACKUP) {
- tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen,
+ tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL);
if (!tinfo->buf)
goto outtinfo;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0625a42df..c09d6c719 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -126,7 +126,7 @@ EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
unsigned int nf_conntrack_hash_rnd __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
{
unsigned int n;
@@ -135,7 +135,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
* three bytes manually.
*/
n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
- return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+ return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
(((__force __u16)tuple->dst.u.all << 16) |
tuple->dst.protonum));
}
@@ -151,15 +151,15 @@ static u32 hash_bucket(u32 hash, const struct net *net)
}
static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
- u16 zone, unsigned int size)
+ unsigned int size)
{
- return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
+ return __hash_bucket(hash_conntrack_raw(tuple), size);
}
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+static inline u_int32_t hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple)
{
- return __hash_conntrack(tuple, zone, net->ct.htable_size);
+ return __hash_conntrack(tuple, net->ct.htable_size);
}
bool
@@ -288,7 +288,9 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
}
/* Released via destroy_conntrack() */
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags)
{
struct nf_conn *tmpl;
@@ -299,24 +301,15 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
tmpl->status = IPS_TEMPLATE;
write_pnet(&tmpl->ct_net, net);
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- if (zone) {
- struct nf_conntrack_zone *nf_ct_zone;
+ if (nf_ct_zone_add(tmpl, flags, zone) < 0)
+ goto out_free;
- nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
- if (!nf_ct_zone)
- goto out_free;
- nf_ct_zone->id = zone;
- }
-#endif
atomic_set(&tmpl->ct_general.use, 0);
return tmpl;
-#ifdef CONFIG_NF_CONNTRACK_ZONES
out_free:
kfree(tmpl);
return NULL;
-#endif
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
@@ -374,7 +367,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
- u16 zone = nf_ct_zone(ct);
unsigned int sequence;
nf_ct_helper_destroy(ct);
@@ -382,9 +374,9 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
local_bh_disable();
do {
sequence = read_seqcount_begin(&net->ct.generation);
- hash = hash_conntrack(net, zone,
+ hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- reply_hash = hash_conntrack(net, zone,
+ reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -432,8 +424,8 @@ static void death_by_timeout(unsigned long ul_conntrack)
static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
- const struct nf_conntrack_tuple *tuple,
- u16 zone)
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone)
{
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
@@ -441,8 +433,8 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
* so we need to check that the conntrack is confirmed
*/
return nf_ct_tuple_equal(tuple, &h->tuple) &&
- nf_ct_zone(ct) == zone &&
- nf_ct_is_confirmed(ct);
+ nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+ nf_ct_is_confirmed(ct);
}
/*
@@ -451,7 +443,7 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
* and recheck nf_ct_tuple_equal(tuple, &h->tuple)
*/
static struct nf_conntrack_tuple_hash *
-____nf_conntrack_find(struct net *net, u16 zone,
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
@@ -487,7 +479,7 @@ begin:
/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash *
-__nf_conntrack_find_get(struct net *net, u16 zone,
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple, u32 hash)
{
struct nf_conntrack_tuple_hash *h;
@@ -514,11 +506,11 @@ begin:
}
struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
return __nf_conntrack_find_get(net, zone, tuple,
- hash_conntrack_raw(tuple, zone));
+ hash_conntrack_raw(tuple));
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
@@ -537,11 +529,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
int
nf_conntrack_hash_check_insert(struct nf_conn *ct)
{
+ const struct nf_conntrack_zone *zone;
struct net *net = nf_ct_net(ct);
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- u16 zone;
unsigned int sequence;
zone = nf_ct_zone(ct);
@@ -549,9 +541,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
local_bh_disable();
do {
sequence = read_seqcount_begin(&net->ct.generation);
- hash = hash_conntrack(net, zone,
+ hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- reply_hash = hash_conntrack(net, zone,
+ reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -559,12 +551,14 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
add_timer(&ct->timeout);
@@ -589,6 +583,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
int
__nf_conntrack_confirm(struct sk_buff *skb)
{
+ const struct nf_conntrack_zone *zone;
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
@@ -597,7 +592,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
- u16 zone;
unsigned int sequence;
ct = nf_ct_get(skb, &ctinfo);
@@ -618,7 +612,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
/* reuse the hash saved before */
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = hash_bucket(hash, net);
- reply_hash = hash_conntrack(net, zone,
+ reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -650,12 +644,14 @@ __nf_conntrack_confirm(struct sk_buff *skb)
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
&h->tuple) &&
- zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+ nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+ NF_CT_DIRECTION(h)))
goto out;
/* Timer relative to confirmation time, not original
@@ -708,11 +704,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
struct net *net = nf_ct_net(ignored_conntrack);
+ const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct nf_conn *ct;
- u16 zone = nf_ct_zone(ignored_conntrack);
- unsigned int hash = hash_conntrack(net, zone, tuple);
+ unsigned int hash;
+
+ zone = nf_ct_zone(ignored_conntrack);
+ hash = hash_conntrack(net, tuple);
/* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
@@ -722,7 +721,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
ct = nf_ct_tuplehash_to_ctrack(h);
if (ct != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple) &&
- nf_ct_zone(ct) == zone) {
+ nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
NF_CT_STAT_INC(net, found);
rcu_read_unlock_bh();
return 1;
@@ -811,7 +810,8 @@ void init_nf_conntrack_hash_rnd(void)
}
static struct nf_conn *
-__nf_conntrack_alloc(struct net *net, u16 zone,
+__nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp, u32 hash)
@@ -821,7 +821,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
if (unlikely(!nf_conntrack_hash_rnd)) {
init_nf_conntrack_hash_rnd();
/* recompute the hash as nf_conntrack_hash_rnd is initialized */
- hash = hash_conntrack_raw(orig, zone);
+ hash = hash_conntrack_raw(orig);
}
/* We don't want any race condition at early drop stage */
@@ -841,10 +841,9 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
* SLAB_DESTROY_BY_RCU.
*/
ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
- if (ct == NULL) {
- atomic_dec(&net->ct.count);
- return ERR_PTR(-ENOMEM);
- }
+ if (ct == NULL)
+ goto out;
+
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -858,31 +857,24 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
memset(&ct->__nfct_init_offset[0], 0,
offsetof(struct nf_conn, proto) -
offsetof(struct nf_conn, __nfct_init_offset[0]));
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- if (zone) {
- struct nf_conntrack_zone *nf_ct_zone;
- nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
- if (!nf_ct_zone)
- goto out_free;
- nf_ct_zone->id = zone;
- }
-#endif
+ if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+ goto out_free;
+
/* Because we use RCU lookups, we set ct_general.use to zero before
* this is inserted in any list.
*/
atomic_set(&ct->ct_general.use, 0);
return ct;
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
out_free:
- atomic_dec(&net->ct.count);
kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+out:
+ atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM);
-#endif
}
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp)
@@ -924,8 +916,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_tuple repl_tuple;
struct nf_conntrack_ecache *ecache;
struct nf_conntrack_expect *exp = NULL;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+ const struct nf_conntrack_zone *zone;
struct nf_conn_timeout *timeout_ext;
+ struct nf_conntrack_zone tmp;
unsigned int *timeouts;
if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
@@ -933,6 +926,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
return NULL;
}
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
hash);
if (IS_ERR(ct))
@@ -1027,10 +1021,11 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
int *set_reply,
enum ip_conntrack_info *ctinfo)
{
+ const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
+ struct nf_conntrack_zone tmp;
struct nf_conn *ct;
- u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
u32 hash;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
@@ -1041,7 +1036,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
}
/* look for tuple match */
- hash = hash_conntrack_raw(&tuple, zone);
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+ hash = hash_conntrack_raw(&tuple);
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) {
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1597,8 +1593,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
struct nf_conntrack_tuple_hash, hnnode);
ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode);
- bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
- hashsize);
+ bucket = __hash_conntrack(&h->tuple, hashsize);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index b45a4223c..acf5c7b3f 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -88,7 +88,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
}
struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
@@ -100,7 +101,7 @@ __nf_ct_expect_find(struct net *net, u16 zone,
h = nf_ct_expect_dst_hash(tuple);
hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
- nf_ct_zone(i->master) == zone)
+ nf_ct_zone_equal_any(i->master, zone))
return i;
}
return NULL;
@@ -109,7 +110,8 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
/* Just find a expectation corresponding to a tuple. */
struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
@@ -127,7 +129,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
/* If an expectation for this connection is found, it gets delete from
* global list then returned. */
struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i, *exp = NULL;
@@ -140,7 +143,7 @@ nf_ct_find_expectation(struct net *net, u16 zone,
hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
- nf_ct_zone(i->master) == zone) {
+ nf_ct_zone_equal_any(i->master, zone)) {
exp = i;
break;
}
@@ -220,16 +223,16 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
}
return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
- nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
}
static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
return a->master == b->master && a->class == b->class &&
- nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
- nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
- nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+ nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+ nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
}
/* Generally a bad idea to call this: could have matched already. */
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index bb53f120e..3ce5c314e 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -14,6 +14,8 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
+static spinlock_t nf_connlabels_lock;
+
static unsigned int label_bits(const struct nf_conn_labels *l)
{
unsigned int longs = l->words;
@@ -48,7 +50,6 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit)
}
EXPORT_SYMBOL_GPL(nf_connlabel_set);
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
static void replace_u32(u32 *address, u32 mask, u32 new)
{
u32 old, tmp;
@@ -89,7 +90,35 @@ int nf_connlabels_replace(struct nf_conn *ct,
return 0;
}
EXPORT_SYMBOL_GPL(nf_connlabels_replace);
-#endif
+
+int nf_connlabels_get(struct net *net, unsigned int n_bits)
+{
+ size_t words;
+
+ if (n_bits > (NF_CT_LABELS_MAX_SIZE * BITS_PER_BYTE))
+ return -ERANGE;
+
+ words = BITS_TO_LONGS(n_bits);
+
+ spin_lock(&nf_connlabels_lock);
+ net->ct.labels_used++;
+ if (words > net->ct.label_words)
+ net->ct.label_words = words;
+ spin_unlock(&nf_connlabels_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_connlabels_get);
+
+void nf_connlabels_put(struct net *net)
+{
+ spin_lock(&nf_connlabels_lock);
+ net->ct.labels_used--;
+ if (net->ct.labels_used == 0)
+ net->ct.label_words = 0;
+ spin_unlock(&nf_connlabels_lock);
+}
+EXPORT_SYMBOL_GPL(nf_connlabels_put);
static struct nf_ct_ext_type labels_extend __read_mostly = {
.len = sizeof(struct nf_conn_labels),
@@ -99,6 +128,7 @@ static struct nf_ct_ext_type labels_extend __read_mostly = {
int nf_conntrack_labels_init(void)
{
+ spin_lock_init(&nf_connlabels_lock);
return nf_ct_extend_register(&labels_extend);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6b8b0abbf..94a66541e 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -128,6 +128,20 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
}
static inline int
+ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+ const struct nf_conntrack_zone *zone, int dir)
+{
+ if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
+ return 0;
+ if (nla_put_be16(skb, attrtype, htons(zone->id)))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static inline int
ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
{
if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
@@ -458,6 +472,7 @@ static int
ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct nf_conn *ct)
{
+ const struct nf_conntrack_zone *zone;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
@@ -473,11 +488,16 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
+ zone = nf_ct_zone(ct);
+
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_ORIG) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -485,10 +505,13 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_REPL) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct) &&
- nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+ NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
if (ctnetlink_dump_status(skb, ct) < 0 ||
@@ -598,7 +621,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
- + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+ + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
#endif
+ ctnetlink_proto_size(ct)
+ ctnetlink_label_size(ct)
@@ -609,6 +632,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
static int
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
{
+ const struct nf_conntrack_zone *zone;
struct net *net;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
@@ -655,11 +679,16 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
nfmsg->res_id = 0;
rcu_read_lock();
+ zone = nf_ct_zone(ct);
+
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_ORIG) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -667,10 +696,13 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_REPL) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct) &&
- nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+ NF_CT_DEFAULT_ZONE_DIR) < 0)
goto nla_put_failure;
if (ctnetlink_dump_id(skb, ct) < 0)
@@ -920,15 +952,54 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
return ret;
}
+static int
+ctnetlink_parse_zone(const struct nlattr *attr,
+ struct nf_conntrack_zone *zone)
+{
+ nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
+ NF_CT_DEFAULT_ZONE_DIR, 0);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ if (attr)
+ zone->id = ntohs(nla_get_be16(attr));
+#else
+ if (attr)
+ return -EOPNOTSUPP;
+#endif
+ return 0;
+}
+
+static int
+ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
+ struct nf_conntrack_zone *zone)
+{
+ int ret;
+
+ if (zone->id != NF_CT_DEFAULT_ZONE_ID)
+ return -EINVAL;
+
+ ret = ctnetlink_parse_zone(attr, zone);
+ if (ret < 0)
+ return ret;
+
+ if (type == CTA_TUPLE_REPLY)
+ zone->dir = NF_CT_ZONE_DIR_REPL;
+ else
+ zone->dir = NF_CT_ZONE_DIR_ORIG;
+
+ return 0;
+}
+
static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
[CTA_TUPLE_IP] = { .type = NLA_NESTED },
[CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
+ [CTA_TUPLE_ZONE] = { .type = NLA_U16 },
};
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
- enum ctattr_type type, u_int8_t l3num)
+ enum ctattr_type type, u_int8_t l3num,
+ struct nf_conntrack_zone *zone)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
@@ -955,6 +1026,16 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
if (err < 0)
return err;
+ if (tb[CTA_TUPLE_ZONE]) {
+ if (!zone)
+ return -EINVAL;
+
+ err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
+ type, zone);
+ if (err < 0)
+ return err;
+ }
+
/* orig and expect tuples get DIR_ORIGINAL */
if (type == CTA_TUPLE_REPLY)
tuple->dst.dir = IP_CT_DIR_REPLY;
@@ -964,21 +1045,6 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
return 0;
}
-static int
-ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
-{
- if (attr)
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- *zone = ntohs(nla_get_be16(attr));
-#else
- return -EOPNOTSUPP;
-#endif
- else
- *zone = 0;
-
- return 0;
-}
-
static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
[CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN - 1 },
@@ -1058,7 +1124,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
struct nf_conn *ct;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1066,9 +1132,11 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
return err;
if (cda[CTA_TUPLE_ORIG])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+ u3, &zone);
else if (cda[CTA_TUPLE_REPLY])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+ u3, &zone);
else {
return ctnetlink_flush_conntrack(net, cda,
NETLINK_CB(skb).portid,
@@ -1078,7 +1146,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- h = nf_conntrack_find_get(net, zone, &tuple);
+ h = nf_conntrack_find_get(net, &zone, &tuple);
if (!h)
return -ENOENT;
@@ -1112,7 +1180,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
struct sk_buff *skb2 = NULL;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -1138,16 +1206,18 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
return err;
if (cda[CTA_TUPLE_ORIG])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+ u3, &zone);
else if (cda[CTA_TUPLE_REPLY])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+ u3, &zone);
else
return -EINVAL;
if (err < 0)
return err;
- h = nf_conntrack_find_get(net, zone, &tuple);
+ h = nf_conntrack_find_get(net, &zone, &tuple);
if (!h)
return -ENOENT;
@@ -1645,7 +1715,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
}
static struct nf_conn *
-ctnetlink_create_conntrack(struct net *net, u16 zone,
+ctnetlink_create_conntrack(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nlattr * const cda[],
struct nf_conntrack_tuple *otuple,
struct nf_conntrack_tuple *rtuple,
@@ -1761,7 +1832,8 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
struct nf_conntrack_tuple_hash *master_h;
struct nf_conn *master_ct;
- err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
+ u3, NULL);
if (err < 0)
goto err2;
@@ -1804,7 +1876,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nf_conn *ct;
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1812,21 +1884,23 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
return err;
if (cda[CTA_TUPLE_ORIG]) {
- err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
+ err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
+ u3, &zone);
if (err < 0)
return err;
}
if (cda[CTA_TUPLE_REPLY]) {
- err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
+ err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
+ u3, &zone);
if (err < 0)
return err;
}
if (cda[CTA_TUPLE_ORIG])
- h = nf_conntrack_find_get(net, zone, &otuple);
+ h = nf_conntrack_find_get(net, &zone, &otuple);
else if (cda[CTA_TUPLE_REPLY])
- h = nf_conntrack_find_get(net, zone, &rtuple);
+ h = nf_conntrack_find_get(net, &zone, &rtuple);
if (h == NULL) {
err = -ENOENT;
@@ -1836,7 +1910,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
return -EINVAL;
- ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
+ ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
&rtuple, u3);
if (IS_ERR(ct))
return PTR_ERR(ct);
@@ -2082,7 +2156,7 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
- + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+ + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
#endif
+ ctnetlink_proto_size(ct)
;
@@ -2091,14 +2165,20 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
static int
ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
{
+ const struct nf_conntrack_zone *zone;
struct nlattr *nest_parms;
rcu_read_lock();
+ zone = nf_ct_zone(ct);
+
nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
if (!nest_parms)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_ORIG) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -2106,12 +2186,14 @@ ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
goto nla_put_failure;
if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
goto nla_put_failure;
+ if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+ NF_CT_ZONE_DIR_REPL) < 0)
+ goto nla_put_failure;
nla_nest_end(skb, nest_parms);
- if (nf_ct_zone(ct)) {
- if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
- goto nla_put_failure;
- }
+ if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+ NF_CT_DEFAULT_ZONE_DIR) < 0)
+ goto nla_put_failure;
if (ctnetlink_dump_id(skb, ct) < 0)
goto nla_put_failure;
@@ -2218,12 +2300,12 @@ static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
int err;
err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
- nf_ct_l3num(ct));
+ nf_ct_l3num(ct), NULL);
if (err < 0)
return err;
return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
- nf_ct_l3num(ct));
+ nf_ct_l3num(ct), NULL);
}
static int
@@ -2612,23 +2694,22 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
- u16 zone = 0;
+ struct nf_conntrack_zone zone;
struct netlink_dump_control c = {
.dump = ctnetlink_exp_ct_dump_table,
.done = ctnetlink_exp_done,
};
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+ u3, NULL);
if (err < 0)
return err;
- if (cda[CTA_EXPECT_ZONE]) {
- err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
- if (err < 0)
- return err;
- }
+ err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+ if (err < 0)
+ return err;
- h = nf_conntrack_find_get(net, zone, &tuple);
+ h = nf_conntrack_find_get(net, &zone, &tuple);
if (!h)
return -ENOENT;
@@ -2652,7 +2733,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
struct sk_buff *skb2;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -2672,16 +2753,18 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
return err;
if (cda[CTA_EXPECT_TUPLE])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
else if (cda[CTA_EXPECT_MASTER])
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+ u3, NULL);
else
return -EINVAL;
if (err < 0)
return err;
- exp = nf_ct_expect_find_get(net, zone, &tuple);
+ exp = nf_ct_expect_find_get(net, &zone, &tuple);
if (!exp)
return -ENOENT;
@@ -2732,8 +2815,8 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct hlist_node *next;
u_int8_t u3 = nfmsg->nfgen_family;
+ struct nf_conntrack_zone zone;
unsigned int i;
- u16 zone;
int err;
if (cda[CTA_EXPECT_TUPLE]) {
@@ -2742,12 +2825,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
/* bump usage count to 2 */
- exp = nf_ct_expect_find_get(net, zone, &tuple);
+ exp = nf_ct_expect_find_get(net, &zone, &tuple);
if (!exp)
return -ENOENT;
@@ -2849,7 +2933,8 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
return -EINVAL;
err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
- &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+ &nat_tuple, CTA_EXPECT_NAT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
@@ -2937,7 +3022,8 @@ err_out:
}
static int
-ctnetlink_create_expect(struct net *net, u16 zone,
+ctnetlink_create_expect(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nlattr * const cda[],
u_int8_t u3, u32 portid, int report)
{
@@ -2949,13 +3035,16 @@ ctnetlink_create_expect(struct net *net, u16 zone,
int err;
/* caller guarantees that those three CTA_EXPECT_* exist */
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+ err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
+ u3, NULL);
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+ err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
+ u3, NULL);
if (err < 0)
return err;
@@ -3011,7 +3100,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
struct nf_conntrack_expect *exp;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- u16 zone;
+ struct nf_conntrack_zone zone;
int err;
if (!cda[CTA_EXPECT_TUPLE]
@@ -3023,19 +3112,18 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+ u3, NULL);
if (err < 0)
return err;
spin_lock_bh(&nf_conntrack_expect_lock);
- exp = __nf_ct_expect_find(net, zone, &tuple);
-
+ exp = __nf_ct_expect_find(net, &zone, &tuple);
if (!exp) {
spin_unlock_bh(&nf_conntrack_expect_lock);
err = -ENOENT;
if (nlh->nlmsg_flags & NLM_F_CREATE) {
- err = ctnetlink_create_expect(net, zone, cda,
- u3,
+ err = ctnetlink_create_expect(net, &zone, cda, u3,
NETLINK_CB(skb).portid,
nlmsg_report(nlh));
}
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 825c3e3f8..5588c7ae1 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -143,13 +143,14 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
const struct nf_conntrack_tuple *t)
{
const struct nf_conntrack_tuple_hash *h;
+ const struct nf_conntrack_zone *zone;
struct nf_conntrack_expect *exp;
struct nf_conn *sibling;
- u16 zone = nf_ct_zone(ct);
pr_debug("trying to timeout ct or exp for tuple ");
nf_ct_dump_tuple(t);
+ zone = nf_ct_zone(ct);
h = nf_conntrack_find_get(net, zone, t);
if (h) {
sibling = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index b45da90fa..67197731e 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -42,6 +42,8 @@ static const char *const sctp_conntrack_names[] = {
"SHUTDOWN_SENT",
"SHUTDOWN_RECD",
"SHUTDOWN_ACK_SENT",
+ "HEARTBEAT_SENT",
+ "HEARTBEAT_ACKED",
};
#define SECS * HZ
@@ -57,6 +59,8 @@ static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
[SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
[SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
#define sNO SCTP_CONNTRACK_NONE
@@ -67,6 +71,8 @@ static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
#define sSS SCTP_CONNTRACK_SHUTDOWN_SENT
#define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+#define sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
#define sIV SCTP_CONNTRACK_MAX
/*
@@ -88,6 +94,10 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
to that of the SHUTDOWN chunk.
CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
the SHUTDOWN chunk. Connection is closed.
+HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow.
+HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK in the direction opposite to
+ that of the HEARTBEAT chunk. Secondary connection is
+ established.
*/
/* TODO
@@ -97,36 +107,40 @@ CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
- Check the error type in the reply dir before transitioning from
cookie echoed to closed.
- Sec 5.2.4 of RFC 2960
- - Multi Homing support.
+ - Full Multi Homing support.
*/
/* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
{
/* ORIGINAL */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
-/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/
-/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
},
{
/* REPLY */
-/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
-/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */
-/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
}
};
@@ -278,9 +292,16 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
i = 8;
break;
+ case SCTP_CID_HEARTBEAT:
+ pr_debug("SCTP_CID_HEARTBEAT");
+ i = 9;
+ break;
+ case SCTP_CID_HEARTBEAT_ACK:
+ pr_debug("SCTP_CID_HEARTBEAT_ACK");
+ i = 10;
+ break;
default:
- /* Other chunks like DATA, SACK, HEARTBEAT and
- its ACK do not cause a change in state */
+ /* Other chunks like DATA or SACK do not change the state */
pr_debug("Unknown chunk type, Will stay in %s\n",
sctp_conntrack_names[cur_state]);
return cur_state;
@@ -329,6 +350,8 @@ static int sctp_packet(struct nf_conn *ct,
!test_bit(SCTP_CID_COOKIE_ECHO, map) &&
!test_bit(SCTP_CID_ABORT, map) &&
!test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+ !test_bit(SCTP_CID_HEARTBEAT, map) &&
+ !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
sh->vtag != ct->proto.sctp.vtag[dir]) {
pr_debug("Verification tag check failed\n");
goto out;
@@ -357,6 +380,16 @@ static int sctp_packet(struct nf_conn *ct,
/* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
+ } else if (sch->type == SCTP_CID_HEARTBEAT ||
+ sch->type == SCTP_CID_HEARTBEAT_ACK) {
+ if (ct->proto.sctp.vtag[dir] == 0) {
+ pr_debug("Setting vtag %x for dir %d\n",
+ sh->vtag, dir);
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+ pr_debug("Verification tag check failed\n");
+ goto out_unlock;
+ }
}
old_state = ct->proto.sctp.state;
@@ -466,6 +499,10 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
/* Sec 8.5.1 (A) */
return false;
}
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ pr_debug("Setting vtag %x for secondary conntrack\n",
+ sh->vtag);
+ ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
}
/* If it is a shutdown ack OOTB packet, we expect a return
shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
@@ -610,6 +647,8 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
[CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
@@ -658,6 +697,18 @@ static struct ctl_table sctp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+ {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_sent",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "nf_conntrack_sctp_timeout_heartbeat_acked",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{ }
};
@@ -730,6 +781,8 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+ pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT];
+ pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED];
#endif
return 0;
}
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index ce3e840c8..dff0f0cc5 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -103,9 +103,9 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
ntohl(sack->end_seq), ntohl(new_end_seq));
inet_proto_csum_replace4(&tcph->check, skb,
- sack->start_seq, new_start_seq, 0);
+ sack->start_seq, new_start_seq, false);
inet_proto_csum_replace4(&tcph->check, skb,
- sack->end_seq, new_end_seq, 0);
+ sack->end_seq, new_end_seq, false);
sack->start_seq = new_start_seq;
sack->end_seq = new_end_seq;
sackoff += sizeof(*sack);
@@ -193,8 +193,9 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
newseq = htonl(ntohl(tcph->seq) + seqoff);
newack = htonl(ntohl(tcph->ack_seq) - ackoff);
- inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
- inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
+ false);
pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index fc823fa5d..1fb3cacc0 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -140,6 +140,35 @@ static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
}
#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+ int dir)
+{
+ const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+
+ if (zone->dir != dir)
+ return;
+ switch (zone->dir) {
+ case NF_CT_DEFAULT_ZONE_DIR:
+ seq_printf(s, "zone=%u ", zone->id);
+ break;
+ case NF_CT_ZONE_DIR_ORIG:
+ seq_printf(s, "zone-orig=%u ", zone->id);
+ break;
+ case NF_CT_ZONE_DIR_REPL:
+ seq_printf(s, "zone-reply=%u ", zone->id);
+ break;
+ default:
+ break;
+ }
+}
+#else
+static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+ int dir)
+{
+}
+#endif
+
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
@@ -202,6 +231,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto);
+ ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
+
if (seq_has_overflowed(s))
goto release;
@@ -214,6 +245,8 @@ static int ct_seq_show(struct seq_file *s, void *v)
print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto);
+ ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
+
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
goto release;
@@ -228,11 +261,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
#endif
ct_show_secctx(s, ct);
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
- seq_printf(s, "zone=%u ", nf_ct_zone(ct));
-#endif
-
+ ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
ct_show_delta_time(s, ct);
seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 399210693..065522564 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -19,7 +19,7 @@ unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
/* nf_queue.c */
int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
struct nf_hook_state *state, unsigned int queuenum);
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops);
int __init netfilter_queue_init(void);
/* nf_log.c */
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 4e0b47831..5113dfd39 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -118,14 +118,13 @@ EXPORT_SYMBOL(nf_xfrm_me_harder);
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
-hash_by_src(const struct net *net, u16 zone,
- const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
/* Original src, to ensure we map it consistently if poss. */
hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
- tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+ tuple->dst.protonum ^ nf_conntrack_hash_rnd);
return reciprocal_scale(hash, net->ct.nat_htable_size);
}
@@ -185,20 +184,22 @@ same_src(const struct nf_conn *ct,
/* Only called for SRC manip */
static int
-find_appropriate_src(struct net *net, u16 zone,
+find_appropriate_src(struct net *net,
+ const struct nf_conntrack_zone *zone,
const struct nf_nat_l3proto *l3proto,
const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
const struct nf_nat_range *range)
{
- unsigned int h = hash_by_src(net, zone, tuple);
+ unsigned int h = hash_by_src(net, tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
ct = nat->ct;
- if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
+ if (same_src(ct, tuple) &&
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
/* Copy source part from reply tuple. */
nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -218,7 +219,8 @@ find_appropriate_src(struct net *net, u16 zone,
* the ip with the lowest src-ip/dst-ip/proto usage.
*/
static void
-find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
+find_best_ips_proto(const struct nf_conntrack_zone *zone,
+ struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
const struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
@@ -258,7 +260,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
*/
j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
range->flags & NF_NAT_RANGE_PERSISTENT ?
- 0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+ 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
full_range = false;
for (i = 0; i <= max; i++) {
@@ -297,10 +299,12 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
+ const struct nf_conntrack_zone *zone;
const struct nf_nat_l3proto *l3proto;
const struct nf_nat_l4proto *l4proto;
struct net *net = nf_ct_net(ct);
- u16 zone = nf_ct_zone(ct);
+
+ zone = nf_ct_zone(ct);
rcu_read_lock();
l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
@@ -420,7 +424,7 @@ nf_nat_setup_info(struct nf_conn *ct,
if (maniptype == NF_NAT_MANIP_SRC) {
unsigned int srchash;
- srchash = hash_by_src(net, nf_ct_zone(ct),
+ srchash = hash_by_src(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate extension aera */
diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
index b8067b53f..15c47b246 100644
--- a/net/netfilter/nf_nat_proto_dccp.c
+++ b/net/netfilter/nf_nat_proto_dccp.c
@@ -69,7 +69,7 @@ dccp_manip_pkt(struct sk_buff *skb,
l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
tuple, maniptype);
inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
- 0);
+ false);
return true;
}
diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
index 37f5505f4..4f8820fc5 100644
--- a/net/netfilter/nf_nat_proto_tcp.c
+++ b/net/netfilter/nf_nat_proto_tcp.c
@@ -70,7 +70,7 @@ tcp_manip_pkt(struct sk_buff *skb,
return true;
l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
- inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+ inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
return true;
}
diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
index b0ede2f0d..b1e627227 100644
--- a/net/netfilter/nf_nat_proto_udp.c
+++ b/net/netfilter/nf_nat_proto_udp.c
@@ -57,7 +57,7 @@ udp_manip_pkt(struct sk_buff *skb,
l3proto->csum_update(skb, iphdroff, &hdr->check,
tuple, maniptype);
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
- 0);
+ false);
if (!hdr->check)
hdr->check = CSUM_MANGLED_0;
}
diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c
index 368f14e01..58340c97b 100644
--- a/net/netfilter/nf_nat_proto_udplite.c
+++ b/net/netfilter/nf_nat_proto_udplite.c
@@ -56,7 +56,7 @@ udplite_manip_pkt(struct sk_buff *skb,
}
l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
- inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
+ inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
if (!hdr->check)
hdr->check = CSUM_MANGLED_0;
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 8a8b2abc3..96777f9a9 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -105,21 +105,15 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
{
const struct nf_queue_handler *qh;
- struct net *net;
- rtnl_lock();
rcu_read_lock();
qh = rcu_dereference(queue_handler);
- if (qh) {
- for_each_net(net) {
- qh->nf_hook_drop(net, ops);
- }
- }
+ if (qh)
+ qh->nf_hook_drop(net, ops);
rcu_read_unlock();
- rtnl_unlock();
}
/*
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index d6ee8f8b1..c8a4a48bc 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -17,10 +17,12 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_tcpudp.h>
#include <linux/netfilter/xt_SYNPROXY.h>
+
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_zones.h>
int synproxy_net_id;
EXPORT_SYMBOL_GPL(synproxy_net_id);
@@ -186,7 +188,7 @@ unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
const struct nf_conn_synproxy *synproxy)
{
unsigned int optoff, optend;
- u32 *ptr, old;
+ __be32 *ptr, old;
if (synproxy->tsoff == 0)
return 1;
@@ -214,18 +216,18 @@ unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
if (op[0] == TCPOPT_TIMESTAMP &&
op[1] == TCPOLEN_TIMESTAMP) {
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
- ptr = (u32 *)&op[2];
+ ptr = (__be32 *)&op[2];
old = *ptr;
*ptr = htonl(ntohl(*ptr) -
synproxy->tsoff);
} else {
- ptr = (u32 *)&op[6];
+ ptr = (__be32 *)&op[6];
old = *ptr;
*ptr = htonl(ntohl(*ptr) +
synproxy->tsoff);
}
inet_proto_csum_replace4(&th->check, skb,
- old, *ptr, 0);
+ old, *ptr, false);
return 1;
}
optoff += op[1];
@@ -352,7 +354,7 @@ static int __net_init synproxy_net_init(struct net *net)
struct nf_conn *ct;
int err = -ENOMEM;
- ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
+ ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
if (!ct)
goto err1;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index cfe636808..4a41eb92b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -130,20 +130,24 @@ static void nft_trans_destroy(struct nft_trans *trans)
int nft_register_basechain(struct nft_base_chain *basechain,
unsigned int hook_nops)
{
+ struct net *net = read_pnet(&basechain->pnet);
+
if (basechain->flags & NFT_BASECHAIN_DISABLED)
return 0;
- return nf_register_hooks(basechain->ops, hook_nops);
+ return nf_register_net_hooks(net, basechain->ops, hook_nops);
}
EXPORT_SYMBOL_GPL(nft_register_basechain);
void nft_unregister_basechain(struct nft_base_chain *basechain,
unsigned int hook_nops)
{
+ struct net *net = read_pnet(&basechain->pnet);
+
if (basechain->flags & NFT_BASECHAIN_DISABLED)
return;
- nf_unregister_hooks(basechain->ops, hook_nops);
+ nf_unregister_net_hooks(net, basechain->ops, hook_nops);
}
EXPORT_SYMBOL_GPL(nft_unregister_basechain);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index f77bad46a..05d0b0353 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -114,7 +114,6 @@ unsigned int
nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
{
const struct nft_chain *chain = ops->priv, *basechain = chain;
- const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
@@ -125,10 +124,6 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
int rulenum;
unsigned int gencursor = nft_genmask_cur(net);
- /* Ignore chains that are not for the current network namespace */
- if (!net_eq(net, chain_net))
- return NF_ACCEPT;
-
do_chain:
rulenum = 0;
rule = list_entry(&chain->rules, struct nft_rule, list);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index c18af2f63..fefbf5f0b 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -27,8 +27,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
-static LIST_HEAD(nfnl_acct_list);
-
struct nf_acct {
atomic64_t pkts;
atomic64_t bytes;
@@ -53,6 +51,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
struct nf_acct *nfacct, *matching = NULL;
+ struct net *net = sock_net(nfnl);
char *acct_name;
unsigned int size = 0;
u32 flags = 0;
@@ -64,7 +63,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
if (strlen(acct_name) == 0)
return -EINVAL;
- list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+ list_for_each_entry(nfacct, &net->nfnl_acct_list, head) {
if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
@@ -124,7 +123,7 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
}
atomic_set(&nfacct->refcnt, 1);
- list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+ list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
return 0;
}
@@ -185,6 +184,7 @@ nla_put_failure:
static int
nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
struct nf_acct *cur, *last;
const struct nfacct_filter *filter = cb->data;
@@ -196,7 +196,7 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[1] = 0;
rcu_read_lock();
- list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+ list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
if (last) {
if (cur != last)
continue;
@@ -257,6 +257,7 @@ static int
nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
+ struct net *net = sock_net(nfnl);
int ret = -ENOENT;
struct nf_acct *cur;
char *acct_name;
@@ -283,7 +284,7 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
return -EINVAL;
acct_name = nla_data(tb[NFACCT_NAME]);
- list_for_each_entry(cur, &nfnl_acct_list, head) {
+ list_for_each_entry(cur, &net->nfnl_acct_list, head) {
struct sk_buff *skb2;
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
@@ -336,19 +337,20 @@ static int
nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
+ struct net *net = sock_net(nfnl);
char *acct_name;
struct nf_acct *cur;
int ret = -ENOENT;
if (!tb[NFACCT_NAME]) {
- list_for_each_entry(cur, &nfnl_acct_list, head)
+ list_for_each_entry(cur, &net->nfnl_acct_list, head)
nfnl_acct_try_del(cur);
return 0;
}
acct_name = nla_data(tb[NFACCT_NAME]);
- list_for_each_entry(cur, &nfnl_acct_list, head) {
+ list_for_each_entry(cur, &net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
continue;
@@ -394,12 +396,12 @@ static const struct nfnetlink_subsystem nfnl_acct_subsys = {
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
-struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
{
struct nf_acct *cur, *acct = NULL;
rcu_read_lock();
- list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+ list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
continue;
@@ -422,7 +424,9 @@ EXPORT_SYMBOL_GPL(nfnl_acct_find_get);
void nfnl_acct_put(struct nf_acct *acct)
{
- atomic_dec(&acct->refcnt);
+ if (atomic_dec_and_test(&acct->refcnt))
+ kfree_rcu(acct, rcu_head);
+
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(nfnl_acct_put);
@@ -478,34 +482,59 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
}
EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
+static int __net_init nfnl_acct_net_init(struct net *net)
+{
+ INIT_LIST_HEAD(&net->nfnl_acct_list);
+
+ return 0;
+}
+
+static void __net_exit nfnl_acct_net_exit(struct net *net)
+{
+ struct nf_acct *cur, *tmp;
+
+ list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
+ list_del_rcu(&cur->head);
+
+ if (atomic_dec_and_test(&cur->refcnt))
+ kfree_rcu(cur, rcu_head);
+ }
+}
+
+static struct pernet_operations nfnl_acct_ops = {
+ .init = nfnl_acct_net_init,
+ .exit = nfnl_acct_net_exit,
+};
+
static int __init nfnl_acct_init(void)
{
int ret;
+ ret = register_pernet_subsys(&nfnl_acct_ops);
+ if (ret < 0) {
+ pr_err("nfnl_acct_init: failed to register pernet ops\n");
+ goto err_out;
+ }
+
pr_info("nfnl_acct: registering with nfnetlink.\n");
ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
if (ret < 0) {
pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
- goto err_out;
+ goto cleanup_pernet;
}
return 0;
+
+cleanup_pernet:
+ unregister_pernet_subsys(&nfnl_acct_ops);
err_out:
return ret;
}
static void __exit nfnl_acct_exit(void)
{
- struct nf_acct *cur, *tmp;
-
pr_info("nfnl_acct: unregistering from nfnetlink.\n");
nfnetlink_subsys_unregister(&nfnl_acct_subsys);
-
- list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
- list_del_rcu(&cur->head);
- /* We are sure that our objects have no clients at this point,
- * it's safe to release them all without checking refcnt. */
- kfree_rcu(cur, rcu_head);
- }
+ unregister_pernet_subsys(&nfnl_acct_ops);
}
module_init(nfnl_acct_init);
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 685cc6a17..a5cd6d90b 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -301,7 +301,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
__be32 **packet_id_ptr)
{
size_t size;
- size_t data_len = 0, cap_len = 0;
+ size_t data_len = 0, cap_len = 0, rem_len = 0;
unsigned int hlen = 0;
struct sk_buff *skb;
struct nlattr *nla;
@@ -360,6 +360,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
hlen = min_t(unsigned int, hlen, data_len);
size += sizeof(struct nlattr) + hlen;
cap_len = entskb->len;
+ rem_len = data_len - hlen;
break;
}
@@ -377,7 +378,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
size += nla_total_size(seclen);
}
- skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
+ skb = __netlink_alloc_skb(net->nfnl, size, rem_len, queue->peer_portid,
GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 175912392..1067fb4c1 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -18,39 +18,59 @@
#include <net/netfilter/nf_tables.h>
struct nft_counter {
- seqlock_t lock;
u64 bytes;
u64 packets;
};
+struct nft_counter_percpu {
+ struct nft_counter counter;
+ struct u64_stats_sync syncp;
+};
+
+struct nft_counter_percpu_priv {
+ struct nft_counter_percpu __percpu *counter;
+};
+
static void nft_counter_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- struct nft_counter *priv = nft_expr_priv(expr);
-
- write_seqlock_bh(&priv->lock);
- priv->bytes += pkt->skb->len;
- priv->packets++;
- write_sequnlock_bh(&priv->lock);
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu *this_cpu;
+
+ local_bh_disable();
+ this_cpu = this_cpu_ptr(priv->counter);
+ u64_stats_update_begin(&this_cpu->syncp);
+ this_cpu->counter.bytes += pkt->skb->len;
+ this_cpu->counter.packets++;
+ u64_stats_update_end(&this_cpu->syncp);
+ local_bh_enable();
}
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
- struct nft_counter *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu *cpu_stats;
+ struct nft_counter total;
+ u64 bytes, packets;
unsigned int seq;
- u64 bytes;
- u64 packets;
-
- do {
- seq = read_seqbegin(&priv->lock);
- bytes = priv->bytes;
- packets = priv->packets;
- } while (read_seqretry(&priv->lock, seq));
-
- if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
- goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+ int cpu;
+
+ memset(&total, 0, sizeof(total));
+ for_each_possible_cpu(cpu) {
+ cpu_stats = per_cpu_ptr(priv->counter, cpu);
+ do {
+ seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ bytes = cpu_stats->counter.bytes;
+ packets = cpu_stats->counter.packets;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+
+ total.packets += packets;
+ total.bytes += bytes;
+ }
+
+ if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
+ nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
goto nla_put_failure;
return 0;
@@ -67,23 +87,44 @@ static int nft_counter_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
- struct nft_counter *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+ struct nft_counter_percpu __percpu *cpu_stats;
+ struct nft_counter_percpu *this_cpu;
+
+ cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
+ if (cpu_stats == NULL)
+ return ENOMEM;
+
+ preempt_disable();
+ this_cpu = this_cpu_ptr(cpu_stats);
+ if (tb[NFTA_COUNTER_PACKETS]) {
+ this_cpu->counter.packets =
+ be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+ }
+ if (tb[NFTA_COUNTER_BYTES]) {
+ this_cpu->counter.bytes =
+ be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+ }
+ preempt_enable();
+ priv->counter = cpu_stats;
+ return 0;
+}
- if (tb[NFTA_COUNTER_PACKETS])
- priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
- if (tb[NFTA_COUNTER_BYTES])
- priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+static void nft_counter_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
- seqlock_init(&priv->lock);
- return 0;
+ free_percpu(priv->counter);
}
static struct nft_expr_type nft_counter_type;
static const struct nft_expr_ops nft_counter_ops = {
.type = &nft_counter_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
.eval = nft_counter_eval,
.init = nft_counter_init,
+ .destroy = nft_counter_destroy,
.dump = nft_counter_dump,
};
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 435c1ccd6..5d67938f8 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -20,63 +20,79 @@
static DEFINE_SPINLOCK(limit_lock);
struct nft_limit {
+ u64 last;
u64 tokens;
+ u64 tokens_max;
u64 rate;
- u64 unit;
- unsigned long stamp;
+ u64 nsecs;
+ u32 burst;
};
-static void nft_limit_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
{
- struct nft_limit *priv = nft_expr_priv(expr);
+ u64 now, tokens;
+ s64 delta;
spin_lock_bh(&limit_lock);
- if (time_after_eq(jiffies, priv->stamp)) {
- priv->tokens = priv->rate;
- priv->stamp = jiffies + priv->unit * HZ;
- }
-
- if (priv->tokens >= 1) {
- priv->tokens--;
+ now = ktime_get_ns();
+ tokens = limit->tokens + now - limit->last;
+ if (tokens > limit->tokens_max)
+ tokens = limit->tokens_max;
+
+ limit->last = now;
+ delta = tokens - cost;
+ if (delta >= 0) {
+ limit->tokens = delta;
spin_unlock_bh(&limit_lock);
- return;
+ return false;
}
+ limit->tokens = tokens;
spin_unlock_bh(&limit_lock);
-
- regs->verdict.code = NFT_BREAK;
+ return true;
}
-static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
- [NFTA_LIMIT_RATE] = { .type = NLA_U64 },
- [NFTA_LIMIT_UNIT] = { .type = NLA_U64 },
-};
-
-static int nft_limit_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
+static int nft_limit_init(struct nft_limit *limit,
const struct nlattr * const tb[])
{
- struct nft_limit *priv = nft_expr_priv(expr);
+ u64 unit;
if (tb[NFTA_LIMIT_RATE] == NULL ||
tb[NFTA_LIMIT_UNIT] == NULL)
return -EINVAL;
- priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
- priv->unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
- priv->stamp = jiffies + priv->unit * HZ;
- priv->tokens = priv->rate;
+ limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+ unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+ limit->nsecs = unit * NSEC_PER_SEC;
+ if (limit->rate == 0 || limit->nsecs < unit)
+ return -EOVERFLOW;
+ limit->tokens = limit->tokens_max = limit->nsecs;
+
+ if (tb[NFTA_LIMIT_BURST]) {
+ u64 rate;
+
+ limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+
+ rate = limit->rate + limit->burst;
+ if (rate < limit->rate)
+ return -EOVERFLOW;
+
+ limit->rate = rate;
+ }
+ limit->last = ktime_get_ns();
+
return 0;
}
-static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
+ enum nft_limit_type type)
{
- const struct nft_limit *priv = nft_expr_priv(expr);
+ u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
+ u64 rate = limit->rate - limit->burst;
- if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
- goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+ if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
+ nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+ nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
+ nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)))
goto nla_put_failure;
return 0;
@@ -84,18 +100,114 @@ nla_put_failure:
return -1;
}
+struct nft_limit_pkts {
+ struct nft_limit limit;
+ u64 cost;
+};
+
+static void nft_limit_pkts_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+ if (nft_limit_eval(&priv->limit, priv->cost))
+ regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+ [NFTA_LIMIT_RATE] = { .type = NLA_U64 },
+ [NFTA_LIMIT_UNIT] = { .type = NLA_U64 },
+ [NFTA_LIMIT_BURST] = { .type = NLA_U32 },
+ [NFTA_LIMIT_TYPE] = { .type = NLA_U32 },
+};
+
+static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_limit_pkts *priv = nft_expr_priv(expr);
+ int err;
+
+ err = nft_limit_init(&priv->limit, tb);
+ if (err < 0)
+ return err;
+
+ priv->cost = div_u64(priv->limit.nsecs, priv->limit.rate);
+ return 0;
+}
+
+static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+ return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+}
+
static struct nft_expr_type nft_limit_type;
-static const struct nft_expr_ops nft_limit_ops = {
+static const struct nft_expr_ops nft_limit_pkts_ops = {
+ .type = &nft_limit_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+ .eval = nft_limit_pkts_eval,
+ .init = nft_limit_pkts_init,
+ .dump = nft_limit_pkts_dump,
+};
+
+static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_limit *priv = nft_expr_priv(expr);
+ u64 cost = div_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+ if (nft_limit_eval(priv, cost))
+ regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_limit_pkt_bytes_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_limit *priv = nft_expr_priv(expr);
+
+ return nft_limit_init(priv, tb);
+}
+
+static int nft_limit_pkt_bytes_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_limit *priv = nft_expr_priv(expr);
+
+ return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+}
+
+static const struct nft_expr_ops nft_limit_pkt_bytes_ops = {
.type = &nft_limit_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
- .eval = nft_limit_eval,
- .init = nft_limit_init,
- .dump = nft_limit_dump,
+ .eval = nft_limit_pkt_bytes_eval,
+ .init = nft_limit_pkt_bytes_init,
+ .dump = nft_limit_pkt_bytes_dump,
};
+static const struct nft_expr_ops *
+nft_limit_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ if (tb[NFTA_LIMIT_TYPE] == NULL)
+ return &nft_limit_pkts_ops;
+
+ switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
+ case NFT_LIMIT_PKTS:
+ return &nft_limit_pkts_ops;
+ case NFT_LIMIT_PKT_BYTES:
+ return &nft_limit_pkt_bytes_ops;
+ }
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static struct nft_expr_type nft_limit_type __read_mostly = {
.name = "limit",
- .ops = &nft_limit_ops,
+ .select_ops = nft_limit_select_ops,
.policy = nft_limit_policy,
.maxattr = NFTA_LIMIT_MAX,
.flags = NFT_EXPR_STATEFUL,
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 52561e1c3..cb2f13ebb 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -166,11 +166,13 @@ void nft_meta_get_eval(const struct nft_expr *expr,
goto err;
*dest = out->group;
break;
+#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
if (skb->sk == NULL || !sk_fullsock(skb->sk))
goto err;
*dest = skb->sk->sk_classid;
break;
+#endif
default:
WARN_ON(1);
goto err;
@@ -246,7 +248,9 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
case NFT_META_CPU:
case NFT_META_IIFGROUP:
case NFT_META_OIFGROUP:
+#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
+#endif
len = sizeof(u32);
break;
case NFT_META_IIFNAME:
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 94fb3b27a..09b4b07eb 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -9,6 +9,7 @@
*/
#include <linux/kernel.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
@@ -17,6 +18,53 @@
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
+/* add vlan header into the user buffer for if tag was removed by offloads */
+static bool
+nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+{
+ int mac_off = skb_mac_header(skb) - skb->data;
+ u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+ struct vlan_ethhdr veth;
+
+ vlanh = (u8 *) &veth;
+ if (offset < ETH_HLEN) {
+ u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+
+ if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+ return false;
+
+ veth.h_vlan_proto = skb->vlan_proto;
+
+ memcpy(dst_u8, vlanh + offset, ethlen);
+
+ len -= ethlen;
+ if (len == 0)
+ return true;
+
+ dst_u8 += ethlen;
+ offset = ETH_HLEN;
+ } else if (offset >= VLAN_ETH_HLEN) {
+ offset -= VLAN_HLEN;
+ goto skip;
+ }
+
+ veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+ veth.h_vlan_encapsulated_proto = skb->protocol;
+
+ vlanh += offset;
+
+ vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
+ memcpy(dst_u8, vlanh, vlan_len);
+
+ len -= vlan_len;
+ if (!len)
+ return true;
+
+ dst_u8 += vlan_len;
+ skip:
+ return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+}
+
static void nft_payload_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -26,10 +74,18 @@ static void nft_payload_eval(const struct nft_expr *expr,
u32 *dest = &regs->data[priv->dreg];
int offset;
+ dest[priv->len / NFT_REG32_SIZE] = 0;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
+
+ if (skb_vlan_tag_present(skb)) {
+ if (!nft_payload_copy_vlan(dest, skb,
+ priv->offset, priv->len))
+ goto err;
+ return;
+ }
offset = skb_mac_header(skb) - skb->data;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
@@ -43,7 +99,6 @@ static void nft_payload_eval(const struct nft_expr *expr,
}
offset += priv->offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
goto err;
return;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index d324fe712..9b42b5ea6 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -67,9 +67,6 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
[NFPROTO_IPV6] = "ip6",
};
-/* Allow this many total (re)entries. */
-static const unsigned int xt_jumpstack_multiplier = 2;
-
/* Registration hooks for targets. */
int xt_register_target(struct xt_target *target)
{
@@ -688,8 +685,6 @@ void xt_free_table_info(struct xt_table_info *info)
kvfree(info->jumpstack);
}
- free_percpu(info->stackptr);
-
kvfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);
@@ -732,15 +727,14 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
DEFINE_PER_CPU(seqcount_t, xt_recseq);
EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
+struct static_key xt_tee_enabled __read_mostly;
+EXPORT_SYMBOL_GPL(xt_tee_enabled);
+
static int xt_jumpstack_alloc(struct xt_table_info *i)
{
unsigned int size;
int cpu;
- i->stackptr = alloc_percpu(unsigned int);
- if (i->stackptr == NULL)
- return -ENOMEM;
-
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
i->jumpstack = vzalloc(size);
@@ -749,8 +743,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
if (i->jumpstack == NULL)
return -ENOMEM;
- i->stacksize *= xt_jumpstack_multiplier;
- size = sizeof(void *) * i->stacksize;
+ /* ruleset without jumps -- no stack needed */
+ if (i->stacksize == 0)
+ return 0;
+
+ /* Jumpstack needs to be able to record two full callchains, one
+ * from the first rule set traversal, plus one table reentrancy
+ * via -j TEE without clobbering the callchain that brought us to
+ * TEE target.
+ *
+ * This is done by allocating two jumpstacks per cpu, on reentry
+ * the upper half of the stack is used.
+ *
+ * see the jumpstack setup in ipt_do_table() for more details.
+ */
+ size = sizeof(void *) * i->stacksize * 2u;
for_each_possible_cpu(cpu) {
if (size > PAGE_SIZE)
i->jumpstack[cpu] = vmalloc_node(size,
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index f3377ce1f..faf32d888 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -181,9 +181,23 @@ out:
#endif
}
+static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
+{
+ switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
+ XT_CT_ZONE_DIR_REPL)) {
+ case XT_CT_ZONE_DIR_ORIG:
+ return NF_CT_ZONE_DIR_ORIG;
+ case XT_CT_ZONE_DIR_REPL:
+ return NF_CT_ZONE_DIR_REPL;
+ default:
+ return NF_CT_DEFAULT_ZONE_DIR;
+ }
+}
+
static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
+ struct nf_conntrack_zone zone;
struct nf_conn *ct;
int ret = -EOPNOTSUPP;
@@ -193,7 +207,9 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
}
#ifndef CONFIG_NF_CONNTRACK_ZONES
- if (info->zone)
+ if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
+ XT_CT_ZONE_DIR_REPL |
+ XT_CT_ZONE_MARK))
goto err1;
#endif
@@ -201,7 +217,13 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
if (ret < 0)
goto err1;
- ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
+ memset(&zone, 0, sizeof(zone));
+ zone.id = info->zone;
+ zone.dir = xt_ct_flags_to_dir(info);
+ if (info->flags & XT_CT_ZONE_MARK)
+ zone.flags |= NF_CT_FLAG_MARK;
+
+ ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
if (!ct) {
ret = -ENOMEM;
goto err2;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 8c3190e2f..8c02501a5 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -144,7 +144,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
inet_proto_csum_replace2(&tcph->check, skb,
htons(oldmss), htons(newmss),
- 0);
+ false);
return 0;
}
}
@@ -185,18 +185,18 @@ tcpmss_mangle_packet(struct sk_buff *skb,
memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
inet_proto_csum_replace2(&tcph->check, skb,
- htons(len), htons(len + TCPOLEN_MSS), 1);
+ htons(len), htons(len + TCPOLEN_MSS), true);
opt[0] = TCPOPT_MSS;
opt[1] = TCPOLEN_MSS;
opt[2] = (newmss & 0xff00) >> 8;
opt[3] = newmss & 0x00ff;
- inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
+ inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
oldval = ((__be16 *)tcph)[6];
tcph->doff += TCPOLEN_MSS/4;
inet_proto_csum_replace2(&tcph->check, skb,
- oldval, ((__be16 *)tcph)[6], 0);
+ oldval, ((__be16 *)tcph)[6], false);
return TCPOLEN_MSS;
}
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 625fa1d63..eb92bffff 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -80,7 +80,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
n <<= 8;
}
inet_proto_csum_replace2(&tcph->check, skb, htons(o),
- htons(n), 0);
+ htons(n), false);
}
memset(opt + i, TCPOPT_NOP, optl);
}
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index a747eb475..fd980aa77 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -10,26 +10,15 @@
* modify it under the terms of the GNU General Public License
* version 2 or later, as published by the Free Software Foundation.
*/
-#include <linux/ip.h>
#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/route.h>
#include <linux/skbuff.h>
-#include <linux/notifier.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/route.h>
+#include <linux/route.h>
#include <linux/netfilter/x_tables.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
#include <linux/netfilter/xt_TEE.h>
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-# define WITH_CONNTRACK 1
-# include <net/netfilter/nf_conntrack.h>
-#endif
-
struct xt_tee_priv {
struct notifier_block notifier;
struct xt_tee_tginfo *tginfo;
@@ -37,163 +26,25 @@ struct xt_tee_priv {
};
static const union nf_inet_addr tee_zero_address;
-static DEFINE_PER_CPU(bool, tee_active);
-
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
- const struct dst_entry *dst;
-
- if (skb->dev != NULL)
- return dev_net(skb->dev);
- dst = skb_dst(skb);
- if (dst != NULL && dst->dev != NULL)
- return dev_net(dst->dev);
-#endif
- return &init_net;
-}
-
-static bool
-tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
- const struct iphdr *iph = ip_hdr(skb);
- struct net *net = pick_net(skb);
- struct rtable *rt;
- struct flowi4 fl4;
-
- memset(&fl4, 0, sizeof(fl4));
- if (info->priv) {
- if (info->priv->oif == -1)
- return false;
- fl4.flowi4_oif = info->priv->oif;
- }
- fl4.daddr = info->gw.ip;
- fl4.flowi4_tos = RT_TOS(iph->tos);
- fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
- fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
- rt = ip_route_output_key(net, &fl4);
- if (IS_ERR(rt))
- return false;
-
- skb_dst_drop(skb);
- skb_dst_set(skb, &rt->dst);
- skb->dev = rt->dst.dev;
- skb->protocol = htons(ETH_P_IP);
- return true;
-}
static unsigned int
tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
- struct iphdr *iph;
- if (__this_cpu_read(tee_active))
- return XT_CONTINUE;
- /*
- * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
- * the original skb, which should continue on its way as if nothing has
- * happened. The copy should be independently delivered to the TEE
- * --gateway.
- */
- skb = pskb_copy(skb, GFP_ATOMIC);
- if (skb == NULL)
- return XT_CONTINUE;
-
-#ifdef WITH_CONNTRACK
- /* Avoid counting cloned packets towards the original connection. */
- nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_ct_untracked_get()->ct_general;
- skb->nfctinfo = IP_CT_NEW;
- nf_conntrack_get(skb->nfct);
-#endif
- /*
- * If we are in PREROUTING/INPUT, the checksum must be recalculated
- * since the length could have changed as a result of defragmentation.
- *
- * We also decrease the TTL to mitigate potential TEE loops
- * between two hosts.
- *
- * Set %IP_DF so that the original source is notified of a potentially
- * decreased MTU on the clone route. IPv6 does this too.
- */
- iph = ip_hdr(skb);
- iph->frag_off |= htons(IP_DF);
- if (par->hooknum == NF_INET_PRE_ROUTING ||
- par->hooknum == NF_INET_LOCAL_IN)
- --iph->ttl;
- ip_send_check(iph);
+ nf_dup_ipv4(skb, par->hooknum, &info->gw.in, info->priv->oif);
- if (tee_tg_route4(skb, info)) {
- __this_cpu_write(tee_active, true);
- ip_local_out(skb);
- __this_cpu_write(tee_active, false);
- } else {
- kfree_skb(skb);
- }
return XT_CONTINUE;
}
-#if IS_ENABLED(CONFIG_IPV6)
-static bool
-tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
- struct net *net = pick_net(skb);
- struct dst_entry *dst;
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- if (info->priv) {
- if (info->priv->oif == -1)
- return false;
- fl6.flowi6_oif = info->priv->oif;
- }
- fl6.daddr = info->gw.in6;
- fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
- (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
- fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst->error) {
- dst_release(dst);
- return false;
- }
- skb_dst_drop(skb);
- skb_dst_set(skb, dst);
- skb->dev = dst->dev;
- skb->protocol = htons(ETH_P_IPV6);
- return true;
-}
-
+#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
static unsigned int
tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_tee_tginfo *info = par->targinfo;
- if (__this_cpu_read(tee_active))
- return XT_CONTINUE;
- skb = pskb_copy(skb, GFP_ATOMIC);
- if (skb == NULL)
- return XT_CONTINUE;
+ nf_dup_ipv6(skb, par->hooknum, &info->gw.in6, info->priv->oif);
-#ifdef WITH_CONNTRACK
- nf_conntrack_put(skb->nfct);
- skb->nfct = &nf_ct_untracked_get()->ct_general;
- skb->nfctinfo = IP_CT_NEW;
- nf_conntrack_get(skb->nfct);
-#endif
- if (par->hooknum == NF_INET_PRE_ROUTING ||
- par->hooknum == NF_INET_LOCAL_IN) {
- struct ipv6hdr *iph = ipv6_hdr(skb);
- --iph->hop_limit;
- }
- if (tee_tg_route6(skb, info)) {
- __this_cpu_write(tee_active, true);
- ip6_local_out(skb);
- __this_cpu_write(tee_active, false);
- } else {
- kfree_skb(skb);
- }
return XT_CONTINUE;
}
#endif
@@ -252,6 +103,7 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
} else
info->priv = NULL;
+ static_key_slow_inc(&xt_tee_enabled);
return 0;
}
@@ -263,6 +115,7 @@ static void tee_tg_destroy(const struct xt_tgdtor_param *par)
unregister_netdevice_notifier(&info->priv->notifier);
kfree(info->priv);
}
+ static_key_slow_dec(&xt_tee_enabled);
}
static struct xt_target tee_tg_reg[] __read_mostly = {
@@ -276,7 +129,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
.destroy = tee_tg_destroy,
.me = THIS_MODULE,
},
-#if IS_ENABLED(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
{
.name = "TEE",
.revision = 1,
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index cca96cec1..d0c96c5ae 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -272,8 +272,7 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
hp->source, lport ? lport : hp->dest,
skb->dev, NFT_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk));
- inet_twsk_put(inet_twsk(sk));
+ inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
}
}
@@ -437,8 +436,7 @@ tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
tgi->lport ? tgi->lport : hp->dest,
skb->dev, NFT_LOOKUP_LISTENER);
if (sk2) {
- inet_twsk_deschedule(inet_twsk(sk));
- inet_twsk_put(inet_twsk(sk));
+ inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
}
}
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c
index 9f8719df2..bb9cbeb18 100644
--- a/net/netfilter/xt_connlabel.c
+++ b/net/netfilter/xt_connlabel.c
@@ -42,10 +42,6 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par)
XT_CONNLABEL_OP_SET;
struct xt_connlabel_mtinfo *info = par->matchinfo;
int ret;
- size_t words;
-
- if (info->bit > XT_CONNLABEL_MAXBIT)
- return -ERANGE;
if (info->options & ~options) {
pr_err("Unknown options in mask %x\n", info->options);
@@ -59,19 +55,15 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par)
return ret;
}
- par->net->ct.labels_used++;
- words = BITS_TO_LONGS(info->bit+1);
- if (words > par->net->ct.label_words)
- par->net->ct.label_words = words;
-
+ ret = nf_connlabels_get(par->net, info->bit + 1);
+ if (ret < 0)
+ nf_ct_l3proto_module_put(par->family);
return ret;
}
static void connlabel_mt_destroy(const struct xt_mtdtor_param *par)
{
- par->net->ct.labels_used--;
- if (par->net->ct.labels_used == 0)
- par->net->ct.label_words = 0;
+ nf_connlabels_put(par->net);
nf_ct_l3proto_module_put(par->family);
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 29ba6218a..075d89d94 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -134,7 +134,7 @@ static bool add_hlist(struct hlist_head *head,
static unsigned int check_hlist(struct net *net,
struct hlist_head *head,
const struct nf_conntrack_tuple *tuple,
- u16 zone,
+ const struct nf_conntrack_zone *zone,
bool *addit)
{
const struct nf_conntrack_tuple_hash *found;
@@ -201,7 +201,7 @@ static unsigned int
count_tree(struct net *net, struct rb_root *root,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr, const union nf_inet_addr *mask,
- u8 family, u16 zone)
+ u8 family, const struct nf_conntrack_zone *zone)
{
struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
struct rb_node **rbnode, *parent;
@@ -290,7 +290,8 @@ static int count_them(struct net *net,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
- u_int8_t family, u16 zone)
+ u_int8_t family,
+ const struct nf_conntrack_zone *zone)
{
struct rb_root *root;
int count;
@@ -321,10 +322,10 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
union nf_inet_addr addr;
struct nf_conntrack_tuple tuple;
const struct nf_conntrack_tuple *tuple_ptr = &tuple;
+ const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int connections;
- u16 zone = NF_CT_DEFAULT_ZONE;
ct = nf_ct_get(skb, &ctinfo);
if (ct != NULL) {
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 8c646ed9c..3048a7e3a 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -37,7 +37,7 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
struct xt_nfacct_match_info *info = par->matchinfo;
struct nf_acct *nfacct;
- nfacct = nfnl_acct_find_get(info->name);
+ nfacct = nfnl_acct_find_get(par->net, info->name);
if (nfacct == NULL) {
pr_info("xt_nfacct: accounting object with name `%s' "
"does not exists\n", info->name);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a133d16eb..fafe33bdb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -84,6 +84,7 @@ struct listeners {
#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
#define NETLINK_F_RECV_NO_ENOBUFS 0x8
#define NETLINK_F_LISTEN_ALL_NSID 0x10
+#define NETLINK_F_CAP_ACK 0x20
static inline int netlink_is_kernel(struct sock *sk)
{
@@ -610,16 +611,6 @@ netlink_current_frame(const struct netlink_ring *ring,
return netlink_lookup_frame(ring, ring->head, status);
}
-static struct nl_mmap_hdr *
-netlink_previous_frame(const struct netlink_ring *ring,
- enum nl_mmap_status status)
-{
- unsigned int prev;
-
- prev = ring->head ? ring->head - 1 : ring->frame_max;
- return netlink_lookup_frame(ring, prev, status);
-}
-
static void netlink_increment_head(struct netlink_ring *ring)
{
ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
@@ -627,11 +618,11 @@ static void netlink_increment_head(struct netlink_ring *ring)
static void netlink_forward_ring(struct netlink_ring *ring)
{
- unsigned int head = ring->head, pos = head;
+ unsigned int head = ring->head;
const struct nl_mmap_hdr *hdr;
do {
- hdr = __netlink_lookup_frame(ring, pos);
+ hdr = __netlink_lookup_frame(ring, ring->head);
if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
break;
if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
@@ -640,6 +631,21 @@ static void netlink_forward_ring(struct netlink_ring *ring)
} while (ring->head != head);
}
+static bool netlink_has_valid_frame(struct netlink_ring *ring)
+{
+ unsigned int head = ring->head, pos = head;
+ const struct nl_mmap_hdr *hdr;
+
+ do {
+ hdr = __netlink_lookup_frame(ring, pos);
+ if (hdr->nm_status == NL_MMAP_STATUS_VALID)
+ return true;
+ pos = pos != 0 ? pos - 1 : ring->frame_max;
+ } while (pos != head);
+
+ return false;
+}
+
static bool netlink_dump_space(struct netlink_sock *nlk)
{
struct netlink_ring *ring = &nlk->rx_ring;
@@ -685,13 +691,19 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
mask = datagram_poll(file, sock, wait);
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (nlk->rx_ring.pg_vec) {
- netlink_forward_ring(&nlk->rx_ring);
- if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
- mask |= POLLIN | POLLRDNORM;
+ /* We could already have received frames in the normal receive
+ * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
+ * so if mask contains pollin/etc already, there's no point
+ * walking the ring.
+ */
+ if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ if (nlk->rx_ring.pg_vec) {
+ if (netlink_has_valid_frame(&nlk->rx_ring))
+ mask |= POLLIN | POLLRDNORM;
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
}
- spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (nlk->tx_ring.pg_vec) {
@@ -1864,15 +1876,16 @@ retry:
}
EXPORT_SYMBOL(netlink_unicast);
-struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask)
+struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ unsigned int ldiff, u32 dst_portid,
+ gfp_t gfp_mask)
{
#ifdef CONFIG_NETLINK_MMAP
+ unsigned int maxlen, linear_size;
struct sock *sk = NULL;
struct sk_buff *skb;
struct netlink_ring *ring;
struct nl_mmap_hdr *hdr;
- unsigned int maxlen;
sk = netlink_getsockbyportid(ssk, dst_portid);
if (IS_ERR(sk))
@@ -1883,7 +1896,11 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
if (ring->pg_vec == NULL)
goto out_put;
- if (ring->frame_size - NL_MMAP_HDRLEN < size)
+ /* We need to account the full linear size needed as a ring
+ * slot cannot have non-linear parts.
+ */
+ linear_size = size + ldiff;
+ if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
goto out_put;
skb = alloc_skb_head(gfp_mask);
@@ -1897,13 +1914,14 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
/* check again under lock */
maxlen = ring->frame_size - NL_MMAP_HDRLEN;
- if (maxlen < size)
+ if (maxlen < linear_size)
goto out_free;
netlink_forward_ring(ring);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL)
goto err2;
+
netlink_ring_setup_skb(skb, sk, ring, hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
@@ -1929,7 +1947,7 @@ out:
#endif
return alloc_skb(size, gfp_mask);
}
-EXPORT_SYMBOL_GPL(netlink_alloc_skb);
+EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
@@ -2290,6 +2308,13 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
err = 0;
break;
+ case NETLINK_CAP_ACK:
+ if (val)
+ nlk->flags |= NETLINK_F_CAP_ACK;
+ else
+ nlk->flags &= ~NETLINK_F_CAP_ACK;
+ err = 0;
+ break;
default:
err = -ENOPROTOOPT;
}
@@ -2346,7 +2371,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
int pos, idx, shift;
err = 0;
- netlink_table_grab();
+ netlink_lock_table();
for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
if (len - pos < sizeof(u32))
break;
@@ -2361,9 +2386,19 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
}
if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
err = -EFAULT;
- netlink_table_ungrab();
+ netlink_unlock_table();
break;
}
+ case NETLINK_CAP_ACK:
+ if (len < sizeof(int))
+ return -EINVAL;
+ len = sizeof(int);
+ val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
+ if (put_user(len, optlen) ||
+ put_user(val, optval))
+ return -EFAULT;
+ err = 0;
+ break;
default:
err = -ENOPROTOOPT;
}
@@ -2918,9 +2953,12 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
struct nlmsghdr *rep;
struct nlmsgerr *errmsg;
size_t payload = sizeof(*errmsg);
+ struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
- /* error messages get the original request appened */
- if (err)
+ /* Error messages get the original request appened, unless the user
+ * requests to cap the error message.
+ */
+ if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
payload += nlmsg_len(nlh);
skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
@@ -2943,7 +2981,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
NLMSG_ERROR, payload, 0);
errmsg = nlmsg_data(rep);
errmsg->error = err;
- memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
+ memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
}
EXPORT_SYMBOL(netlink_ack);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 95af2d24d..943889b87 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -351,6 +351,20 @@ int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload)
}
EXPORT_SYMBOL(nci_prop_cmd);
+int nci_core_reset(struct nci_dev *ndev)
+{
+ return __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_reset);
+
+int nci_core_init(struct nci_dev *ndev)
+{
+ return __nci_request(ndev, nci_init_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_init);
+
static int nci_open_device(struct nci_dev *ndev)
{
int rc = 0;
@@ -388,6 +402,10 @@ static int nci_open_device(struct nci_dev *ndev)
msecs_to_jiffies(NCI_INIT_TIMEOUT));
}
+ if (ndev->ops->post_setup) {
+ rc = ndev->ops->post_setup(ndev);
+ }
+
if (!rc) {
rc = __nci_request(ndev, nci_init_complete_req, 0,
msecs_to_jiffies(NCI_INIT_TIMEOUT));
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index 609f92283..30b09f04c 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -101,6 +101,20 @@ struct nci_hcp_packet {
#define NCI_HCP_MSG_GET_CMD(header) (header & 0x3f)
#define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f)
+static int nci_hci_result_to_errno(u8 result)
+{
+ switch (result) {
+ case NCI_HCI_ANY_OK:
+ return 0;
+ case NCI_HCI_ANY_E_REG_PAR_UNKNOWN:
+ return -EOPNOTSUPP;
+ case NCI_HCI_ANY_E_TIMEOUT:
+ return -ETIME;
+ default:
+ return -1;
+ }
+}
+
/* HCI core */
static void nci_hci_reset_pipes(struct nci_hci_dev *hdev)
{
@@ -146,18 +160,18 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
if (!conn_info)
return -EPROTO;
- skb = nci_skb_alloc(ndev, 2 + conn_info->max_pkt_payload_len +
+ i = 0;
+ skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
NCI_DATA_HDR_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
- skb_reserve(skb, 2 + NCI_DATA_HDR_SIZE);
+ skb_reserve(skb, NCI_DATA_HDR_SIZE + 2);
*skb_push(skb, 1) = data_type;
- i = 0;
- len = conn_info->max_pkt_payload_len;
-
do {
+ len = conn_info->max_pkt_payload_len;
+
/* If last packet add NCI_HFP_NO_CHAINING */
if (i + conn_info->max_pkt_payload_len -
(skb->len + 1) >= data_len) {
@@ -177,9 +191,15 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
return r;
i += len;
+
if (i < data_len) {
- skb_trim(skb, 0);
- skb_pull(skb, len);
+ skb = nci_skb_alloc(ndev,
+ conn_info->max_pkt_payload_len +
+ NCI_DATA_HDR_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, NCI_DATA_HDR_SIZE + 1);
}
} while (i < data_len);
@@ -212,7 +232,8 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
const u8 *param, size_t param_len,
struct sk_buff **skb)
{
- struct nci_conn_info *conn_info;
+ struct nci_hcp_message *message;
+ struct nci_conn_info *conn_info;
struct nci_data data;
int r;
u8 pipe = ndev->hci_dev->gate2pipe[gate];
@@ -232,9 +253,15 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
msecs_to_jiffies(NCI_DATA_TIMEOUT));
-
- if (r == NCI_STATUS_OK && skb)
- *skb = conn_info->rx_skb;
+ if (r == NCI_STATUS_OK) {
+ message = (struct nci_hcp_message *)conn_info->rx_skb->data;
+ r = nci_hci_result_to_errno(
+ NCI_HCP_MSG_GET_CMD(message->header));
+ skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+
+ if (!r && skb)
+ *skb = conn_info->rx_skb;
+ }
return r;
}
@@ -328,9 +355,6 @@ static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
struct nci_conn_info *conn_info;
u8 status = result;
- if (result != NCI_HCI_ANY_OK)
- goto exit;
-
conn_info = ndev->hci_dev->conn_info;
if (!conn_info) {
status = NCI_STATUS_REJECTED;
@@ -340,7 +364,7 @@ static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
conn_info->rx_skb = skb;
exit:
- nci_req_complete(ndev, status);
+ nci_req_complete(ndev, NCI_STATUS_OK);
}
/* Receive hcp message for pipe, with type and cmd.
@@ -378,7 +402,7 @@ static void nci_hci_msg_rx_work(struct work_struct *work)
u8 pipe, type, instruction;
while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
- pipe = skb->data[0];
+ pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]);
skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
message = (struct nci_hcp_message *)skb->data;
type = NCI_HCP_MSG_GET_TYPE(message->header);
@@ -395,7 +419,7 @@ void nci_hci_data_received_cb(void *context,
{
struct nci_dev *ndev = (struct nci_dev *)context;
struct nci_hcp_packet *packet;
- u8 pipe, type, instruction;
+ u8 pipe, type;
struct sk_buff *hcp_skb;
struct sk_buff *frag_skb;
int msg_len;
@@ -415,7 +439,7 @@ void nci_hci_data_received_cb(void *context,
/* it's the last fragment. Does it need re-aggregation? */
if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) {
- pipe = packet->header & NCI_HCI_FRAGMENT;
+ pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
msg_len = 0;
@@ -434,7 +458,7 @@ void nci_hci_data_received_cb(void *context,
*skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe;
skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
- msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
+ msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
memcpy(skb_put(hcp_skb, msg_len), frag_skb->data +
NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len);
}
@@ -452,11 +476,10 @@ void nci_hci_data_received_cb(void *context,
packet = (struct nci_hcp_packet *)hcp_skb->data;
type = NCI_HCP_MSG_GET_TYPE(packet->message.header);
if (type == NCI_HCI_HCP_RESPONSE) {
- pipe = packet->header;
- instruction = NCI_HCP_MSG_GET_CMD(packet->message.header);
- skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN +
- NCI_HCI_HCP_MESSAGE_HEADER_LEN);
- nci_hci_hcp_message_rx(ndev, pipe, type, instruction, hcp_skb);
+ pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
+ skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
+ nci_hci_hcp_message_rx(ndev, pipe, type,
+ NCI_STATUS_OK, hcp_skb);
} else {
skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb);
schedule_work(&ndev->hci_dev->msg_rx_work);
@@ -488,6 +511,7 @@ EXPORT_SYMBOL(nci_hci_open_pipe);
int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
const u8 *param, size_t param_len)
{
+ struct nci_hcp_message *message;
struct nci_conn_info *conn_info;
struct nci_data data;
int r;
@@ -520,6 +544,12 @@ int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
r = nci_request(ndev, nci_hci_send_data_req,
(unsigned long)&data,
msecs_to_jiffies(NCI_DATA_TIMEOUT));
+ if (r == NCI_STATUS_OK) {
+ message = (struct nci_hcp_message *)conn_info->rx_skb->data;
+ r = nci_hci_result_to_errno(
+ NCI_HCP_MSG_GET_CMD(message->header));
+ skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+ }
kfree(tmp);
return r;
@@ -529,6 +559,7 @@ EXPORT_SYMBOL(nci_hci_set_param);
int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
struct sk_buff **skb)
{
+ struct nci_hcp_message *message;
struct nci_conn_info *conn_info;
struct nci_data data;
int r;
@@ -553,8 +584,15 @@ int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
msecs_to_jiffies(NCI_DATA_TIMEOUT));
- if (r == NCI_STATUS_OK)
- *skb = conn_info->rx_skb;
+ if (r == NCI_STATUS_OK) {
+ message = (struct nci_hcp_message *)conn_info->rx_skb->data;
+ r = nci_hci_result_to_errno(
+ NCI_HCP_MSG_GET_CMD(message->header));
+ skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+
+ if (!r && skb)
+ *skb = conn_info->rx_skb;
+ }
return r;
}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 73d1ca7c5..853172c27 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -63,6 +63,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
.len = NFC_FIRMWARE_NAME_MAXSIZE },
[NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+ [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
+
};
static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -1503,7 +1505,7 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
u32 dev_idx, vid, subcmd;
u8 *data;
size_t data_len;
- int i;
+ int i, err;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
!info->attrs[NFC_ATTR_VENDOR_ID] ||
@@ -1534,12 +1536,92 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
if (cmd->vendor_id != vid || cmd->subcmd != subcmd)
continue;
- return cmd->doit(dev, data, data_len);
+ dev->cur_cmd_info = info;
+ err = cmd->doit(dev, data, data_len);
+ dev->cur_cmd_info = NULL;
+ return err;
}
return -EOPNOTSUPP;
}
+/* message building helper */
+static inline void *nfc_hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, u8 cmd)
+{
+ /* since there is no private header just add the generic one */
+ return genlmsg_put(skb, portid, seq, &nfc_genl_family, flags, cmd);
+}
+
+static struct sk_buff *
+__nfc_alloc_vendor_cmd_skb(struct nfc_dev *dev, int approxlen,
+ u32 portid, u32 seq,
+ enum nfc_attrs attr,
+ u32 oui, u32 subcmd, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ void *hdr;
+
+ skb = nlmsg_new(approxlen + 100, gfp);
+ if (!skb)
+ return NULL;
+
+ hdr = nfc_hdr_put(skb, portid, seq, 0, NFC_CMD_VENDOR);
+ if (!hdr) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (nla_put_u32(skb, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, NFC_ATTR_VENDOR_ID, oui))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, NFC_ATTR_VENDOR_SUBCMD, subcmd))
+ goto nla_put_failure;
+
+ ((void **)skb->cb)[0] = dev;
+ ((void **)skb->cb)[1] = hdr;
+
+ return skb;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return NULL;
+}
+
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+ enum nfc_attrs attr,
+ u32 oui, u32 subcmd,
+ int approxlen)
+{
+ if (WARN_ON(!dev->cur_cmd_info))
+ return NULL;
+
+ return __nfc_alloc_vendor_cmd_skb(dev, approxlen,
+ dev->cur_cmd_info->snd_portid,
+ dev->cur_cmd_info->snd_seq, attr,
+ oui, subcmd, GFP_KERNEL);
+}
+EXPORT_SYMBOL(__nfc_alloc_vendor_cmd_reply_skb);
+
+int nfc_vendor_cmd_reply(struct sk_buff *skb)
+{
+ struct nfc_dev *dev = ((void **)skb->cb)[0];
+ void *hdr = ((void **)skb->cb)[1];
+
+ /* clear CB data for netlink core to own from now on */
+ memset(skb->cb, 0, sizeof(skb->cb));
+
+ if (WARN_ON(!dev->cur_cmd_info)) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ genlmsg_end(skb, hdr);
+ return genlmsg_reply(skb, dev->cur_cmd_info);
+}
+EXPORT_SYMBOL(nfc_vendor_cmd_reply);
+
static const struct genl_ops nfc_genl_ops[] = {
{
.cmd = NFC_CMD_GET_DEVICE,
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 15840401a..d143aa9f6 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -5,6 +5,8 @@
config OPENVSWITCH
tristate "Open vSwitch"
depends on INET
+ depends on !NF_CONNTRACK || \
+ (NF_CONNTRACK && (!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6))
select LIBCRC32C
select MPLS
select NET_MPLS_GSO
@@ -34,7 +36,7 @@ config OPENVSWITCH
config OPENVSWITCH_GRE
tristate "Open vSwitch GRE tunneling support"
depends on OPENVSWITCH
- depends on NET_IPGRE_DEMUX
+ depends on NET_IPGRE
default OPENVSWITCH
---help---
If you say Y here, then the Open vSwitch will be able create GRE
@@ -59,7 +61,7 @@ config OPENVSWITCH_VXLAN
config OPENVSWITCH_GENEVE
tristate "Open vSwitch Geneve tunneling support"
depends on OPENVSWITCH
- depends on GENEVE_CORE
+ depends on GENEVE
default OPENVSWITCH
---help---
If you say Y here, then the Open vSwitch will be able create geneve vport.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 91b947841..60f809085 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,6 +15,10 @@ openvswitch-y := \
vport-internal_dev.o \
vport-netdev.o
+ifneq ($(CONFIG_NF_CONNTRACK),)
+openvswitch-y += conntrack.o
+endif
+
+obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
-obj-$(CONFIG_OPENVSWITCH_VXLAN) += vport-vxlan.o
obj-$(CONFIG_OPENVSWITCH_GRE) += vport-gre.o
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ee34f474a..dba635d08 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -22,6 +22,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
+#include <linux/netfilter_ipv6.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -29,8 +30,10 @@
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
+#include <net/dst.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <net/ip6_fib.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/mpls.h>
@@ -38,6 +41,7 @@
#include "datapath.h"
#include "flow.h"
+#include "conntrack.h"
#include "vport.h"
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
@@ -52,6 +56,20 @@ struct deferred_action {
struct sw_flow_key pkt_key;
};
+#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
+struct ovs_frag_data {
+ unsigned long dst;
+ struct vport *vport;
+ struct ovs_skb_cb cb;
+ __be16 inner_protocol;
+ __u16 vlan_tci;
+ __be16 vlan_proto;
+ unsigned int l2_len;
+ u8 l2_data[MAX_L2_LEN];
+};
+
+static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
+
#define DEFERRED_ACTION_FIFO_SIZE 10
struct action_fifo {
int head;
@@ -185,10 +203,6 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
return 0;
}
-/* 'KEY' must not have any bits set outside of the 'MASK' */
-#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
-#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
-
static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
const __be32 *mpls_lse, const __be32 *mask)
{
@@ -201,7 +215,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
return err;
stack = (__be32 *)skb_mpls_header(skb);
- lse = MASKED(*stack, *mpls_lse, *mask);
+ lse = OVS_MASKED(*stack, *mpls_lse, *mask);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be32 diff[] = { ~(*stack), lse };
@@ -244,9 +258,9 @@ static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
const u16 *src = (const u16 *)src_;
const u16 *mask = (const u16 *)mask_;
- SET_MASKED(dst[0], src[0], mask[0]);
- SET_MASKED(dst[1], src[1], mask[1]);
- SET_MASKED(dst[2], src[2], mask[2]);
+ OVS_SET_MASKED(dst[0], src[0], mask[0]);
+ OVS_SET_MASKED(dst[1], src[1], mask[1]);
+ OVS_SET_MASKED(dst[2], src[2], mask[2]);
}
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
@@ -284,14 +298,14 @@ static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
} else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
@@ -316,14 +330,14 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
if (l4_proto == NEXTHDR_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
} else if (l4_proto == NEXTHDR_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace16(&uh->check, skb,
- addr, new_addr, 1);
+ addr, new_addr, true);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
@@ -331,17 +345,17 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
} else if (l4_proto == NEXTHDR_ICMP) {
if (likely(transport_len >= sizeof(struct icmp6hdr)))
inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
- skb, addr, new_addr, 1);
+ skb, addr, new_addr, true);
}
}
static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
const __be32 mask[4], __be32 masked[4])
{
- masked[0] = MASKED(old[0], addr[0], mask[0]);
- masked[1] = MASKED(old[1], addr[1], mask[1]);
- masked[2] = MASKED(old[2], addr[2], mask[2]);
- masked[3] = MASKED(old[3], addr[3], mask[3]);
+ masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
+ masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
+ masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
+ masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
}
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
@@ -358,15 +372,15 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
{
/* Bits 21-24 are always unmasked, so this retains their values. */
- SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
- SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
- SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
+ OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
+ OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
+ OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
}
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
u8 mask)
{
- new_ttl = MASKED(nh->ttl, new_ttl, mask);
+ new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
nh->ttl = new_ttl;
@@ -392,7 +406,7 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
* makes sense to check if the value actually changed.
*/
if (mask->ipv4_src) {
- new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
+ new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
if (unlikely(new_addr != nh->saddr)) {
set_ip_addr(skb, nh, &nh->saddr, new_addr);
@@ -400,7 +414,7 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
}
}
if (mask->ipv4_dst) {
- new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
+ new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
if (unlikely(new_addr != nh->daddr)) {
set_ip_addr(skb, nh, &nh->daddr, new_addr);
@@ -488,7 +502,8 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
}
if (mask->ipv6_hlimit) {
- SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
+ OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
+ mask->ipv6_hlimit);
flow_key->ip.ttl = nh->hop_limit;
}
return 0;
@@ -498,7 +513,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
{
- inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+ inet_proto_csum_replace2(check, skb, *port, new_port, false);
*port = new_port;
}
@@ -517,8 +532,8 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
uh = udp_hdr(skb);
/* Either of the masks is non-zero, so do not bother checking them. */
- src = MASKED(uh->source, key->udp_src, mask->udp_src);
- dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
+ src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
+ dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
if (likely(src != uh->source)) {
@@ -558,12 +573,12 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
return err;
th = tcp_hdr(skb);
- src = MASKED(th->source, key->tcp_src, mask->tcp_src);
+ src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
if (likely(src != th->source)) {
set_tp_port(skb, &th->source, src, &th->check);
flow_key->tp.src = src;
}
- dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
+ dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
if (likely(dst != th->dest)) {
set_tp_port(skb, &th->dest, dst, &th->check);
flow_key->tp.dst = dst;
@@ -590,8 +605,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
old_csum = sh->checksum;
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
- sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
- sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
+ sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
+ sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
new_csum = sctp_compute_cksum(skb, sctphoff);
@@ -605,27 +620,161 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0;
}
-static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
+{
+ struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
+ struct vport *vport = data->vport;
+
+ if (skb_cow_head(skb, data->l2_len) < 0) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ __skb_dst_copy(skb, data->dst);
+ *OVS_CB(skb) = data->cb;
+ skb->inner_protocol = data->inner_protocol;
+ skb->vlan_tci = data->vlan_tci;
+ skb->vlan_proto = data->vlan_proto;
+
+ /* Reconstruct the MAC header. */
+ skb_push(skb, data->l2_len);
+ memcpy(skb->data, &data->l2_data, data->l2_len);
+ ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
+ skb_reset_mac_header(skb);
+
+ ovs_vport_send(vport, skb);
+ return 0;
+}
+
+static unsigned int
+ovs_dst_get_mtu(const struct dst_entry *dst)
+{
+ return dst->dev->mtu;
+}
+
+static struct dst_ops ovs_dst_ops = {
+ .family = AF_UNSPEC,
+ .mtu = ovs_dst_get_mtu,
+};
+
+/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
+ * ovs_vport_output(), which is called once per fragmented packet.
+ */
+static void prepare_frag(struct vport *vport, struct sk_buff *skb)
+{
+ unsigned int hlen = skb_network_offset(skb);
+ struct ovs_frag_data *data;
+
+ data = this_cpu_ptr(&ovs_frag_data_storage);
+ data->dst = skb->_skb_refdst;
+ data->vport = vport;
+ data->cb = *OVS_CB(skb);
+ data->inner_protocol = skb->inner_protocol;
+ data->vlan_tci = skb->vlan_tci;
+ data->vlan_proto = skb->vlan_proto;
+ data->l2_len = hlen;
+ memcpy(&data->l2_data, skb->data, hlen);
+
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ skb_pull(skb, hlen);
+}
+
+static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
+ __be16 ethertype)
+{
+ if (skb_network_offset(skb) > MAX_L2_LEN) {
+ OVS_NLERR(1, "L2 header too long to fragment");
+ goto err;
+ }
+
+ if (ethertype == htons(ETH_P_IP)) {
+ struct dst_entry ovs_dst;
+ unsigned long orig_dst;
+
+ prepare_frag(vport, skb);
+ dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
+ ovs_dst.dev = vport->dev;
+
+ orig_dst = skb->_skb_refdst;
+ skb_dst_set_noref(skb, &ovs_dst);
+ IPCB(skb)->frag_max_size = mru;
+
+ ip_do_fragment(skb->sk, skb, ovs_vport_output);
+ refdst_drop(orig_dst);
+ } else if (ethertype == htons(ETH_P_IPV6)) {
+ const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+ unsigned long orig_dst;
+ struct rt6_info ovs_rt;
+
+ if (!v6ops) {
+ goto err;
+ }
+
+ prepare_frag(vport, skb);
+ memset(&ovs_rt, 0, sizeof(ovs_rt));
+ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+ DST_OBSOLETE_NONE, DST_NOCOUNT);
+ ovs_rt.dst.dev = vport->dev;
+
+ orig_dst = skb->_skb_refdst;
+ skb_dst_set_noref(skb, &ovs_rt.dst);
+ IP6CB(skb)->frag_max_size = mru;
+
+ v6ops->fragment(skb->sk, skb, ovs_vport_output);
+ refdst_drop(orig_dst);
+ } else {
+ WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
+ ovs_vport_name(vport), ntohs(ethertype), mru,
+ vport->dev->mtu);
+ goto err;
+ }
+
+ return;
+err:
+ kfree_skb(skb);
+}
+
+static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ struct sw_flow_key *key)
{
struct vport *vport = ovs_vport_rcu(dp, out_port);
- if (likely(vport))
- ovs_vport_send(vport, skb);
- else
+ if (likely(vport)) {
+ u16 mru = OVS_CB(skb)->mru;
+
+ if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
+ ovs_vport_send(vport, skb);
+ } else if (mru <= vport->dev->mtu) {
+ __be16 ethertype = key->eth.type;
+
+ if (!is_flow_key_valid(key)) {
+ if (eth_p_mpls(skb->protocol))
+ ethertype = skb->inner_protocol;
+ else
+ ethertype = vlan_get_protocol(skb);
+ }
+
+ ovs_fragment(vport, skb, mru, ethertype);
+ } else {
+ kfree_skb(skb);
+ }
+ } else {
kfree_skb(skb);
+ }
}
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key, const struct nlattr *attr,
const struct nlattr *actions, int actions_len)
{
- struct ovs_tunnel_info info;
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_ACTION;
+ upcall.mru = OVS_CB(skb)->mru;
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
a = nla_next(a, &rem)) {
@@ -646,11 +795,11 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
if (vport) {
int err;
- err = ovs_vport_get_egress_tun_info(vport, skb,
- &info);
+ err = dev_fill_metadata_dst(vport->dev, skb);
if (!err)
- upcall.egress_tun_info = &info;
+ upcall.egress_tun_info = skb_tunnel_info(skb);
}
+
break;
}
@@ -677,9 +826,12 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
a = nla_next(a, &rem)) {
+ u32 probability;
+
switch (nla_type(a)) {
case OVS_SAMPLE_ATTR_PROBABILITY:
- if (prandom_u32() >= nla_get_u32(a))
+ probability = nla_get_u32(a);
+ if (!probability || prandom_u32() > probability)
return 0;
break;
@@ -741,7 +893,11 @@ static int execute_set_action(struct sk_buff *skb,
{
/* Only tunnel set execution is supported without a mask. */
if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
- OVS_CB(skb)->egress_tun_info = nla_data(a);
+ struct ovs_tunnel_info *tun = nla_data(a);
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)tun->tun_dst);
+ skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
return 0;
}
@@ -759,12 +915,13 @@ static int execute_masked_set_action(struct sk_buff *skb,
switch (nla_type(a)) {
case OVS_KEY_ATTR_PRIORITY:
- SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
+ OVS_SET_MASKED(skb->priority, nla_get_u32(a),
+ *get_mask(a, u32 *));
flow_key->phy.priority = skb->priority;
break;
case OVS_KEY_ATTR_SKB_MARK:
- SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
+ OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
flow_key->phy.skb_mark = skb->mark;
break;
@@ -807,6 +964,13 @@ static int execute_masked_set_action(struct sk_buff *skb,
err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
__be32 *));
break;
+
+ case OVS_KEY_ATTR_CT_STATE:
+ case OVS_KEY_ATTR_CT_ZONE:
+ case OVS_KEY_ATTR_CT_MARK:
+ case OVS_KEY_ATTR_CT_LABELS:
+ err = -EINVAL;
+ break;
}
return err;
@@ -876,7 +1040,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
if (out_skb)
- do_output(dp, out_skb, prev_port);
+ do_output(dp, out_skb, prev_port, key);
prev_port = -1;
}
@@ -933,6 +1097,21 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
case OVS_ACTION_ATTR_SAMPLE:
err = sample(dp, skb, key, a, attr, len);
break;
+
+ case OVS_ACTION_ATTR_CT:
+ if (!is_flow_key_valid(key)) {
+ err = ovs_flow_key_update(skb, key);
+ if (err)
+ return err;
+ }
+
+ err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
+ nla_data(a));
+
+ /* Hide stolen IP fragments from user space. */
+ if (err)
+ return err == -EINPROGRESS ? 0 : err;
+ break;
}
if (unlikely(err)) {
@@ -942,7 +1121,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
}
if (prev_port != -1)
- do_output(dp, skb, prev_port);
+ do_output(dp, skb, prev_port, key);
else
consume_skb(skb);
@@ -984,7 +1163,6 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
int err;
this_cpu_inc(exec_actions_level);
- OVS_CB(skb)->egress_tun_info = NULL;
err = do_execute_actions(dp, skb, key,
acts->actions, acts->actions_len);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
new file mode 100644
index 000000000..50095820e
--- /dev/null
+++ b/net/openvswitch/conntrack.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2015 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/openvswitch.h>
+#include <net/ip.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+
+#include "datapath.h"
+#include "conntrack.h"
+#include "flow.h"
+#include "flow_netlink.h"
+
+struct ovs_ct_len_tbl {
+ size_t maxlen;
+ size_t minlen;
+};
+
+/* Metadata mark for masked write to conntrack mark */
+struct md_mark {
+ u32 value;
+ u32 mask;
+};
+
+/* Metadata label for masked write to conntrack label. */
+struct md_labels {
+ struct ovs_key_ct_labels value;
+ struct ovs_key_ct_labels mask;
+};
+
+/* Conntrack action context for execution. */
+struct ovs_conntrack_info {
+ struct nf_conntrack_helper *helper;
+ struct nf_conntrack_zone zone;
+ struct nf_conn *ct;
+ u8 commit : 1;
+ u16 family;
+ struct md_mark mark;
+ struct md_labels labels;
+};
+
+static u16 key_to_nfproto(const struct sw_flow_key *key)
+{
+ switch (ntohs(key->eth.type)) {
+ case ETH_P_IP:
+ return NFPROTO_IPV4;
+ case ETH_P_IPV6:
+ return NFPROTO_IPV6;
+ default:
+ return NFPROTO_UNSPEC;
+ }
+}
+
+/* Map SKB connection state into the values used by flow definition. */
+static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
+{
+ u8 ct_state = OVS_CS_F_TRACKED;
+
+ switch (ctinfo) {
+ case IP_CT_ESTABLISHED_REPLY:
+ case IP_CT_RELATED_REPLY:
+ case IP_CT_NEW_REPLY:
+ ct_state |= OVS_CS_F_REPLY_DIR;
+ break;
+ default:
+ break;
+ }
+
+ switch (ctinfo) {
+ case IP_CT_ESTABLISHED:
+ case IP_CT_ESTABLISHED_REPLY:
+ ct_state |= OVS_CS_F_ESTABLISHED;
+ break;
+ case IP_CT_RELATED:
+ case IP_CT_RELATED_REPLY:
+ ct_state |= OVS_CS_F_RELATED;
+ break;
+ case IP_CT_NEW:
+ case IP_CT_NEW_REPLY:
+ ct_state |= OVS_CS_F_NEW;
+ break;
+ default:
+ break;
+ }
+
+ return ct_state;
+}
+
+static u32 ovs_ct_get_mark(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+ return ct ? ct->mark : 0;
+#else
+ return 0;
+#endif
+}
+
+static void ovs_ct_get_labels(const struct nf_conn *ct,
+ struct ovs_key_ct_labels *labels)
+{
+ struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
+
+ if (cl) {
+ size_t len = cl->words * sizeof(long);
+
+ if (len > OVS_CT_LABELS_LEN)
+ len = OVS_CT_LABELS_LEN;
+ else if (len < OVS_CT_LABELS_LEN)
+ memset(labels, 0, OVS_CT_LABELS_LEN);
+ memcpy(labels, cl->bits, len);
+ } else {
+ memset(labels, 0, OVS_CT_LABELS_LEN);
+ }
+}
+
+static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conn *ct)
+{
+ key->ct.state = state;
+ key->ct.zone = zone->id;
+ key->ct.mark = ovs_ct_get_mark(ct);
+ ovs_ct_get_labels(ct, &key->ct.labels);
+}
+
+/* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
+ * previously sent the packet to conntrack via the ct action.
+ */
+static void ovs_ct_update_key(const struct sk_buff *skb,
+ struct sw_flow_key *key, bool post_ct)
+{
+ const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ u8 state = 0;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct) {
+ state = ovs_ct_get_state(ctinfo);
+ if (!nf_ct_is_confirmed(ct))
+ state |= OVS_CS_F_NEW;
+ if (ct->master)
+ state |= OVS_CS_F_RELATED;
+ zone = nf_ct_zone(ct);
+ } else if (post_ct) {
+ state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
+ }
+ __ovs_ct_update_key(key, state, zone, ct);
+}
+
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
+{
+ ovs_ct_update_key(skb, key, false);
+}
+
+int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
+{
+ if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+ return -EMSGSIZE;
+
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+ nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, key->ct.zone))
+ return -EMSGSIZE;
+
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+ nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, key->ct.mark))
+ return -EMSGSIZE;
+
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels),
+ &key->ct.labels))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
+ u32 ct_mark, u32 mask)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ u32 new_mark;
+
+
+ /* The connection could be invalid, in which case set_mark is no-op. */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return 0;
+
+ new_mark = ct_mark | (ct->mark & ~(mask));
+ if (ct->mark != new_mark) {
+ ct->mark = new_mark;
+ nf_conntrack_event_cache(IPCT_MARK, ct);
+ key->ct.mark = new_mark;
+ }
+
+ return 0;
+#else
+ return -ENOTSUPP;
+#endif
+}
+
+static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_ct_labels *labels,
+ const struct ovs_key_ct_labels *mask)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn_labels *cl;
+ struct nf_conn *ct;
+ int err;
+
+ /* The connection could be invalid, in which case set_label is no-op.*/
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return 0;
+
+ cl = nf_ct_labels_find(ct);
+ if (!cl) {
+ nf_ct_labels_ext_add(ct);
+ cl = nf_ct_labels_find(ct);
+ }
+ if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN)
+ return -ENOSPC;
+
+ err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask,
+ OVS_CT_LABELS_LEN / sizeof(u32));
+ if (err)
+ return err;
+
+ ovs_ct_get_labels(ct, &key->ct.labels);
+ return 0;
+}
+
+/* 'skb' should already be pulled to nh_ofs. */
+static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
+{
+ const struct nf_conntrack_helper *helper;
+ const struct nf_conn_help *help;
+ enum ip_conntrack_info ctinfo;
+ unsigned int protoff;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+ return NF_ACCEPT;
+
+ help = nfct_help(ct);
+ if (!help)
+ return NF_ACCEPT;
+
+ helper = rcu_dereference(help->helper);
+ if (!helper)
+ return NF_ACCEPT;
+
+ switch (proto) {
+ case NFPROTO_IPV4:
+ protoff = ip_hdrlen(skb);
+ break;
+ case NFPROTO_IPV6: {
+ u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+ __be16 frag_off;
+ int ofs;
+
+ ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ &frag_off);
+ if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
+ pr_debug("proto header not found\n");
+ return NF_ACCEPT;
+ }
+ protoff = ofs;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "helper invoked on non-IP family!");
+ return NF_DROP;
+ }
+
+ return helper->help(skb, protoff, ct, ctinfo);
+}
+
+/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
+ * value if 'skb' is freed.
+ */
+static int handle_fragments(struct net *net, struct sw_flow_key *key,
+ u16 zone, struct sk_buff *skb)
+{
+ struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
+
+ if (key->eth.type == htons(ETH_P_IP)) {
+ enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
+ int err;
+
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ err = ip_defrag(skb, user);
+ if (err)
+ return err;
+
+ ovs_cb.mru = IPCB(skb)->frag_max_size;
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ } else if (key->eth.type == htons(ETH_P_IPV6)) {
+ enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+ struct sk_buff *reasm;
+
+ memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+ reasm = nf_ct_frag6_gather(skb, user);
+ if (!reasm)
+ return -EINPROGRESS;
+
+ if (skb == reasm) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* Don't free 'skb' even though it is one of the original
+ * fragments, as we're going to morph it into the head.
+ */
+ skb_get(skb);
+ nf_ct_frag6_consume_orig(reasm);
+
+ key->ip.proto = ipv6_hdr(reasm)->nexthdr;
+ skb_morph(skb, reasm);
+ skb->next = reasm->next;
+ consume_skb(reasm);
+ ovs_cb.mru = IP6CB(skb)->frag_max_size;
+#endif
+ } else {
+ kfree_skb(skb);
+ return -EPFNOSUPPORT;
+ }
+
+ key->ip.frag = OVS_FRAG_TYPE_NONE;
+ skb_clear_hash(skb);
+ skb->ignore_df = 1;
+ *OVS_CB(skb) = ovs_cb;
+
+ return 0;
+}
+
+static struct nf_conntrack_expect *
+ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
+ u16 proto, const struct sk_buff *skb)
+{
+ struct nf_conntrack_tuple tuple;
+
+ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, &tuple))
+ return NULL;
+ return __nf_ct_expect_find(net, zone, &tuple);
+}
+
+/* Determine whether skb->nfct is equal to the result of conntrack lookup. */
+static bool skb_nfct_cached(const struct net *net, const struct sk_buff *skb,
+ const struct ovs_conntrack_info *info)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return false;
+ if (!net_eq(net, read_pnet(&ct->ct_net)))
+ return false;
+ if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
+ return false;
+ if (info->helper) {
+ struct nf_conn_help *help;
+
+ help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+ if (help && rcu_access_pointer(help->helper) != info->helper)
+ return false;
+ }
+
+ return true;
+}
+
+static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
+ const struct ovs_conntrack_info *info,
+ struct sk_buff *skb)
+{
+ /* If we are recirculating packets to match on conntrack fields and
+ * committing with a separate conntrack action, then we don't need to
+ * actually run the packet through conntrack twice unless it's for a
+ * different zone.
+ */
+ if (!skb_nfct_cached(net, skb, info)) {
+ struct nf_conn *tmpl = info->ct;
+
+ /* Associate skb with specified zone. */
+ if (tmpl) {
+ if (skb->nfct)
+ nf_conntrack_put(skb->nfct);
+ nf_conntrack_get(&tmpl->ct_general);
+ skb->nfct = &tmpl->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ }
+
+ if (nf_conntrack_in(net, info->family, NF_INET_PRE_ROUTING,
+ skb) != NF_ACCEPT)
+ return -ENOENT;
+
+ if (ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
+ WARN_ONCE(1, "helper rejected packet");
+ return -EINVAL;
+ }
+ }
+
+ ovs_ct_update_key(skb, key, true);
+
+ return 0;
+}
+
+/* Lookup connection and read fields into key. */
+static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
+ const struct ovs_conntrack_info *info,
+ struct sk_buff *skb)
+{
+ struct nf_conntrack_expect *exp;
+
+ exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
+ if (exp) {
+ u8 state;
+
+ state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
+ __ovs_ct_update_key(key, state, &info->zone, exp->master);
+ } else {
+ int err;
+
+ err = __ovs_ct_lookup(net, key, info, skb);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Lookup connection and confirm if unconfirmed. */
+static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
+ const struct ovs_conntrack_info *info,
+ struct sk_buff *skb)
+{
+ u8 state;
+ int err;
+
+ state = key->ct.state;
+ if (key->ct.zone == info->zone.id &&
+ ((state & OVS_CS_F_TRACKED) && !(state & OVS_CS_F_NEW))) {
+ /* Previous lookup has shown that this connection is already
+ * tracked and committed. Skip committing.
+ */
+ return 0;
+ }
+
+ err = __ovs_ct_lookup(net, key, info, skb);
+ if (err)
+ return err;
+ if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
+{
+ size_t i;
+
+ for (i = 0; i < sizeof(*labels); i++)
+ if (labels->ct_labels[i])
+ return true;
+
+ return false;
+}
+
+/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
+ * value if 'skb' is freed.
+ */
+int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct ovs_conntrack_info *info)
+{
+ int nh_ofs;
+ int err;
+
+ /* The conntrack module expects to be working at L3. */
+ nh_ofs = skb_network_offset(skb);
+ skb_pull(skb, nh_ofs);
+
+ if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
+ err = handle_fragments(net, key, info->zone.id, skb);
+ if (err)
+ return err;
+ }
+
+ if (info->commit)
+ err = ovs_ct_commit(net, key, info, skb);
+ else
+ err = ovs_ct_lookup(net, key, info, skb);
+ if (err)
+ goto err;
+
+ if (info->mark.mask) {
+ err = ovs_ct_set_mark(skb, key, info->mark.value,
+ info->mark.mask);
+ if (err)
+ goto err;
+ }
+ if (labels_nonzero(&info->labels.mask))
+ err = ovs_ct_set_labels(skb, key, &info->labels.value,
+ &info->labels.mask);
+err:
+ skb_push(skb, nh_ofs);
+ if (err)
+ kfree_skb(skb);
+ return err;
+}
+
+static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
+ const struct sw_flow_key *key, bool log)
+{
+ struct nf_conntrack_helper *helper;
+ struct nf_conn_help *help;
+
+ helper = nf_conntrack_helper_try_module_get(name, info->family,
+ key->ip.proto);
+ if (!helper) {
+ OVS_NLERR(log, "Unknown helper \"%s\"", name);
+ return -EINVAL;
+ }
+
+ help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
+ if (!help) {
+ module_put(helper->me);
+ return -ENOMEM;
+ }
+
+ rcu_assign_pointer(help->helper, helper);
+ info->helper = helper;
+ return 0;
+}
+
+static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
+ [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
+ [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
+ .maxlen = sizeof(u16) },
+ [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
+ .maxlen = sizeof(struct md_mark) },
+ [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
+ .maxlen = sizeof(struct md_labels) },
+ [OVS_CT_ATTR_HELPER] = { .minlen = 1,
+ .maxlen = NF_CT_HELPER_NAME_LEN }
+};
+
+static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
+ const char **helper, bool log)
+{
+ struct nlattr *a;
+ int rem;
+
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+ int maxlen = ovs_ct_attr_lens[type].maxlen;
+ int minlen = ovs_ct_attr_lens[type].minlen;
+
+ if (type > OVS_CT_ATTR_MAX) {
+ OVS_NLERR(log,
+ "Unknown conntrack attr (type=%d, max=%d)",
+ type, OVS_CT_ATTR_MAX);
+ return -EINVAL;
+ }
+ if (nla_len(a) < minlen || nla_len(a) > maxlen) {
+ OVS_NLERR(log,
+ "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
+ type, nla_len(a), maxlen);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case OVS_CT_ATTR_COMMIT:
+ info->commit = true;
+ break;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ case OVS_CT_ATTR_ZONE:
+ info->zone.id = nla_get_u16(a);
+ break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ case OVS_CT_ATTR_MARK: {
+ struct md_mark *mark = nla_data(a);
+
+ if (!mark->mask) {
+ OVS_NLERR(log, "ct_mark mask cannot be 0");
+ return -EINVAL;
+ }
+ info->mark = *mark;
+ break;
+ }
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ case OVS_CT_ATTR_LABELS: {
+ struct md_labels *labels = nla_data(a);
+
+ if (!labels_nonzero(&labels->mask)) {
+ OVS_NLERR(log, "ct_labels mask cannot be 0");
+ return -EINVAL;
+ }
+ info->labels = *labels;
+ break;
+ }
+#endif
+ case OVS_CT_ATTR_HELPER:
+ *helper = nla_data(a);
+ if (!memchr(*helper, '\0', nla_len(a))) {
+ OVS_NLERR(log, "Invalid conntrack helper");
+ return -EINVAL;
+ }
+ break;
+ default:
+ OVS_NLERR(log, "Unknown conntrack attr (%d)",
+ type);
+ return -EINVAL;
+ }
+ }
+
+ if (rem > 0) {
+ OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
+{
+ if (attr == OVS_KEY_ATTR_CT_STATE)
+ return true;
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+ attr == OVS_KEY_ATTR_CT_ZONE)
+ return true;
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+ attr == OVS_KEY_ATTR_CT_MARK)
+ return true;
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ attr == OVS_KEY_ATTR_CT_LABELS) {
+ struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+ return ovs_net->xt_label;
+ }
+
+ return false;
+}
+
+int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa, bool log)
+{
+ struct ovs_conntrack_info ct_info;
+ const char *helper = NULL;
+ u16 family;
+ int err;
+
+ family = key_to_nfproto(key);
+ if (family == NFPROTO_UNSPEC) {
+ OVS_NLERR(log, "ct family unspecified");
+ return -EINVAL;
+ }
+
+ memset(&ct_info, 0, sizeof(ct_info));
+ ct_info.family = family;
+
+ nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
+ NF_CT_DEFAULT_ZONE_DIR, 0);
+
+ err = parse_ct(attr, &ct_info, &helper, log);
+ if (err)
+ return err;
+
+ /* Set up template for tracking connections in specific zones. */
+ ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
+ if (!ct_info.ct) {
+ OVS_NLERR(log, "Failed to allocate conntrack template");
+ return -ENOMEM;
+ }
+ if (helper) {
+ err = ovs_ct_add_helper(&ct_info, helper, key, log);
+ if (err)
+ goto err_free_ct;
+ }
+
+ err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
+ sizeof(ct_info), log);
+ if (err)
+ goto err_free_ct;
+
+ __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+ nf_conntrack_get(&ct_info.ct->ct_general);
+ return 0;
+err_free_ct:
+ nf_conntrack_free(ct_info.ct);
+ return err;
+}
+
+int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
+ struct sk_buff *skb)
+{
+ struct nlattr *start;
+
+ start = nla_nest_start(skb, OVS_ACTION_ATTR_CT);
+ if (!start)
+ return -EMSGSIZE;
+
+ if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT))
+ return -EMSGSIZE;
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+ nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
+ return -EMSGSIZE;
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
+ nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
+ &ct_info->mark))
+ return -EMSGSIZE;
+ if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&ct_info->labels.mask) &&
+ nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
+ &ct_info->labels))
+ return -EMSGSIZE;
+ if (ct_info->helper) {
+ if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
+ ct_info->helper->name))
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, start);
+
+ return 0;
+}
+
+void ovs_ct_free_action(const struct nlattr *a)
+{
+ struct ovs_conntrack_info *ct_info = nla_data(a);
+
+ if (ct_info->helper)
+ module_put(ct_info->helper->me);
+ if (ct_info->ct)
+ nf_ct_put(ct_info->ct);
+}
+
+void ovs_ct_init(struct net *net)
+{
+ unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
+ struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+ if (nf_connlabels_get(net, n_bits)) {
+ ovs_net->xt_label = false;
+ OVS_NLERR(true, "Failed to set connlabel length");
+ } else {
+ ovs_net->xt_label = true;
+ }
+}
+
+void ovs_ct_exit(struct net *net)
+{
+ struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+ if (ovs_net->xt_label)
+ nf_connlabels_put(net);
+}
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
new file mode 100644
index 000000000..a7544f405
--- /dev/null
+++ b/net/openvswitch/conntrack.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OVS_CONNTRACK_H
+#define OVS_CONNTRACK_H 1
+
+#include "flow.h"
+
+struct ovs_conntrack_info;
+enum ovs_key_attr;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+void ovs_ct_init(struct net *);
+void ovs_ct_exit(struct net *);
+bool ovs_ct_verify(struct net *, enum ovs_key_attr attr);
+int ovs_ct_copy_action(struct net *, const struct nlattr *,
+ const struct sw_flow_key *, struct sw_flow_actions **,
+ bool log);
+int ovs_ct_action_to_attr(const struct ovs_conntrack_info *, struct sk_buff *);
+
+int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
+ const struct ovs_conntrack_info *);
+
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
+int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
+void ovs_ct_free_action(const struct nlattr *a);
+
+#define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
+ OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | \
+ OVS_CS_F_INVALID | OVS_CS_F_TRACKED)
+#else
+#include <linux/errno.h>
+
+static inline void ovs_ct_init(struct net *net) { }
+
+static inline void ovs_ct_exit(struct net *net) { }
+
+static inline bool ovs_ct_verify(struct net *net, int attr)
+{
+ return false;
+}
+
+static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **acts, bool log)
+{
+ return -ENOTSUPP;
+}
+
+static inline int ovs_ct_action_to_attr(const struct ovs_conntrack_info *info,
+ struct sk_buff *skb)
+{
+ return -ENOTSUPP;
+}
+
+static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct ovs_conntrack_info *info)
+{
+ kfree_skb(skb);
+ return -ENOTSUPP;
+}
+
+static inline void ovs_ct_fill_key(const struct sk_buff *skb,
+ struct sw_flow_key *key)
+{
+ key->ct.state = 0;
+ key->ct.zone = 0;
+ key->ct.mark = 0;
+ memset(&key->ct.labels, 0, sizeof(key->ct.labels));
+}
+
+static inline int ovs_ct_put_key(const struct sw_flow_key *key,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline void ovs_ct_free_action(const struct nlattr *a) { }
+
+#define CT_SUPPORTED_MASK 0
+#endif /* CONFIG_NF_CONNTRACK */
+#endif /* ovs_conntrack.h */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ff332d1b9..c5d08ee37 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -176,7 +176,7 @@ static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
- return vport->ops->get_name(vport);
+ return ovs_vport_name(vport);
}
static int get_dpifindex(const struct datapath *dp)
@@ -188,7 +188,7 @@ static int get_dpifindex(const struct datapath *dp)
local = ovs_vport_rcu(dp, OVSP_LOCAL);
if (local)
- ifindex = netdev_vport_priv(local)->dev->ifindex;
+ ifindex = local->dev->ifindex;
else
ifindex = 0;
@@ -275,6 +275,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_MISS;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
+ upcall.mru = OVS_CB(skb)->mru;
error = ovs_dp_upcall(dp, skb, key, &upcall);
if (unlikely(error))
kfree_skb(skb);
@@ -400,9 +401,23 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
if (upcall_info->actions_len)
size += nla_total_size(upcall_info->actions_len);
+ /* OVS_PACKET_ATTR_MRU */
+ if (upcall_info->mru)
+ size += nla_total_size(sizeof(upcall_info->mru));
+
return size;
}
+static void pad_packet(struct datapath *dp, struct sk_buff *skb)
+{
+ if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
+ size_t plen = NLA_ALIGN(skb->len) - skb->len;
+
+ if (plen > 0)
+ memset(skb_put(skb, plen), 0, plen);
+ }
+}
+
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info)
@@ -475,8 +490,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
if (upcall_info->egress_tun_info) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
- err = ovs_nla_put_egress_tunnel_key(user_skb,
- upcall_info->egress_tun_info);
+ err = ovs_nla_put_tunnel_info(user_skb,
+ upcall_info->egress_tun_info);
BUG_ON(err);
nla_nest_end(user_skb, nla);
}
@@ -492,6 +507,16 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
nla_nest_cancel(user_skb, nla);
}
+ /* Add OVS_PACKET_ATTR_MRU */
+ if (upcall_info->mru) {
+ if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
+ upcall_info->mru)) {
+ err = -ENOBUFS;
+ goto out;
+ }
+ pad_packet(dp, user_skb);
+ }
+
/* Only reserve room for attribute header, packet data is added
* in skb_zerocopy() */
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -505,12 +530,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
goto out;
/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
- if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
- size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
-
- if (plen > 0)
- memset(skb_put(user_skb, plen), 0, plen);
- }
+ pad_packet(dp, user_skb);
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
@@ -527,6 +547,7 @@ out:
static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = info->userhdr;
+ struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct sw_flow_actions *acts;
struct sk_buff *packet;
@@ -535,6 +556,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct ethhdr *eth;
struct vport *input_vport;
+ u16 mru = 0;
int len;
int err;
bool log = !a[OVS_PACKET_ATTR_PROBE];
@@ -564,29 +586,35 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
else
packet->protocol = htons(ETH_P_802_2);
+ /* Set packet's mru */
+ if (a[OVS_PACKET_ATTR_MRU]) {
+ mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
+ packet->ignore_df = 1;
+ }
+ OVS_CB(packet)->mru = mru;
+
/* Build an sw_flow for sending this packet. */
flow = ovs_flow_alloc();
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
- err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
- &flow->key, log);
+ err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
+ packet, &flow->key, log);
if (err)
goto err_flow_free;
- err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
+ err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
&flow->key, &acts, log);
if (err)
goto err_flow_free;
rcu_assign_pointer(flow->sf_acts, acts);
- OVS_CB(packet)->egress_tun_info = NULL;
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
rcu_read_lock();
- dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
+ dp = get_dp_rcu(net, ovs_header->dp_ifindex);
err = -ENODEV;
if (!dp)
goto err_unlock;
@@ -598,6 +626,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
if (!input_vport)
goto err_unlock;
+ packet->dev = input_vport->dev;
OVS_CB(packet)->input_vport = input_vport;
sf_acts = rcu_dereference(flow->sf_acts);
@@ -624,6 +653,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
+ [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
};
static const struct genl_ops dp_packet_genl_ops[] = {
@@ -713,7 +743,7 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
/* OVS_FLOW_ATTR_ACTIONS */
if (should_fill_actions(ufid_flags))
- len += nla_total_size(acts->actions_len);
+ len += nla_total_size(acts->orig_len);
return len
+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
@@ -880,6 +910,7 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
+ struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow *flow = NULL, *new_flow;
@@ -915,7 +946,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
/* Extract key. */
ovs_match_init(&match, &key, &mask);
- error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+ error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_flow;
@@ -929,8 +960,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_kfree_flow;
/* Validate actions. */
- error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
- &acts, log);
+ error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
+ &new_flow->key, &acts, log);
if (error) {
OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
goto err_kfree_flow;
@@ -944,7 +975,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
ovs_lock();
- dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+ dp = get_dp(net, ovs_header->dp_ifindex);
if (unlikely(!dp)) {
error = -ENODEV;
goto err_unlock_ovs;
@@ -1018,7 +1049,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
ovs_unlock();
- ovs_nla_free_flow_actions(old_acts);
+ ovs_nla_free_flow_actions_rcu(old_acts);
ovs_flow_free(new_flow, false);
}
@@ -1030,7 +1061,7 @@ err_unlock_ovs:
ovs_unlock();
kfree_skb(reply);
err_kfree_acts:
- kfree(acts);
+ ovs_nla_free_flow_actions(acts);
err_kfree_flow:
ovs_flow_free(new_flow, false);
error:
@@ -1038,7 +1069,8 @@ error:
}
/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
-static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
+static struct sw_flow_actions *get_flow_actions(struct net *net,
+ const struct nlattr *a,
const struct sw_flow_key *key,
const struct sw_flow_mask *mask,
bool log)
@@ -1048,7 +1080,7 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
int error;
ovs_flow_mask_key(&masked_key, key, true, mask);
- error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
+ error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
if (error) {
OVS_NLERR(log,
"Actions may not be safe on all matching packets");
@@ -1060,6 +1092,7 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
+ struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key;
@@ -1084,15 +1117,15 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
ovs_match_init(&match, &key, &mask);
- error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+ error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto error;
/* Validate actions. */
if (a[OVS_FLOW_ATTR_ACTIONS]) {
- acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask,
- log);
+ acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
+ &mask, log);
if (IS_ERR(acts)) {
error = PTR_ERR(acts);
goto error;
@@ -1108,7 +1141,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
}
ovs_lock();
- dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+ dp = get_dp(net, ovs_header->dp_ifindex);
if (unlikely(!dp)) {
error = -ENODEV;
goto err_unlock_ovs;
@@ -1157,7 +1190,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (reply)
ovs_notify(&dp_flow_genl_family, reply, info);
if (old_acts)
- ovs_nla_free_flow_actions(old_acts);
+ ovs_nla_free_flow_actions_rcu(old_acts);
return 0;
@@ -1165,7 +1198,7 @@ err_unlock_ovs:
ovs_unlock();
kfree_skb(reply);
err_kfree_acts:
- kfree(acts);
+ ovs_nla_free_flow_actions(acts);
error:
return error;
}
@@ -1174,6 +1207,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
+ struct net *net = sock_net(skb->sk);
struct sw_flow_key key;
struct sk_buff *reply;
struct sw_flow *flow;
@@ -1188,7 +1222,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
ovs_match_init(&match, &key, NULL);
- err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
+ err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
log);
} else if (!ufid_present) {
OVS_NLERR(log,
@@ -1232,6 +1266,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
+ struct net *net = sock_net(skb->sk);
struct sw_flow_key key;
struct sk_buff *reply;
struct sw_flow *flow = NULL;
@@ -1246,8 +1281,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
ovs_match_init(&match, &key, NULL);
- err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
- log);
+ err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
+ NULL, log);
if (unlikely(err))
return err;
}
@@ -1800,7 +1835,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
nla_put_string(skb, OVS_VPORT_ATTR_NAME,
- vport->ops->get_name(vport)))
+ ovs_vport_name(vport)))
goto nla_put_failure;
ovs_vport_get_stats(vport, &vport_stats);
@@ -2203,6 +2238,7 @@ static int __net_init ovs_init_net(struct net *net)
INIT_LIST_HEAD(&ovs_net->dps);
INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
+ ovs_ct_init(net);
return 0;
}
@@ -2219,13 +2255,10 @@ static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
struct vport *vport;
hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
- struct netdev_vport *netdev_vport;
-
if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
continue;
- netdev_vport = netdev_vport_priv(vport);
- if (dev_net(netdev_vport->dev) == dnet)
+ if (dev_net(vport->dev) == dnet)
list_add(&vport->detach_list, head);
}
}
@@ -2240,6 +2273,7 @@ static void __net_exit ovs_exit_net(struct net *dnet)
struct net *net;
LIST_HEAD(head);
+ ovs_ct_exit(dnet);
ovs_lock();
list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
__dp_destroy(dp);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index cd691e935..67bdecd9f 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -25,10 +25,11 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/u64_stats_sync.h>
+#include <net/ip_tunnels.h>
+#include "conntrack.h"
#include "flow.h"
#include "flow_table.h"
-#include "vport.h"
#define DP_MAX_PORTS USHRT_MAX
#define DP_VPORT_HASH_BUCKETS 1024
@@ -92,14 +93,14 @@ struct datapath {
/**
* struct ovs_skb_cb - OVS data in skb CB
- * @egress_tun_key: Tunnel information about this packet on egress path.
- * NULL if the packet is not being tunneled.
* @input_vport: The original vport packet came in on. This value is cached
* when a packet is received by OVS.
+ * @mru: The maximum received fragement size; 0 if the packet is not
+ * fragmented.
*/
struct ovs_skb_cb {
- struct ovs_tunnel_info *egress_tun_info;
struct vport *input_vport;
+ u16 mru;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -112,14 +113,16 @@ struct ovs_skb_cb {
* then no packet is sent and the packet is accounted in the datapath's @n_lost
* counter.
* @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
+ * @mru: If not zero, Maximum received IP fragment size.
*/
struct dp_upcall_info {
- const struct ovs_tunnel_info *egress_tun_info;
+ struct ip_tunnel_info *egress_tun_info;
const struct nlattr *userdata;
const struct nlattr *actions;
int actions_len;
u32 portid;
u8 cmd;
+ u16 mru;
};
/**
@@ -130,7 +133,9 @@ struct dp_upcall_info {
struct ovs_net {
struct list_head dps;
struct work_struct dp_notify_work;
- struct vport_net vport_net;
+
+ /* Module reference for configuring conntrack. */
+ bool xt_label;
};
extern int ovs_net_id;
@@ -199,6 +204,10 @@ void ovs_dp_notify_wq(struct work_struct *work);
int action_fifos_init(void);
void action_fifos_exit(void);
+/* 'KEY' must not have any bits set outside of the 'MASK' */
+#define OVS_MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
+#define OVS_SET_MASKED(OLD, KEY, MASK) ((OLD) = OVS_MASKED(OLD, KEY, MASK))
+
#define OVS_NLERR(logging_allowed, fmt, ...) \
do { \
if (logging_allowed && net_ratelimit()) \
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 2c631fe76..a7a80a6b7 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,13 +58,10 @@ void ovs_dp_notify_wq(struct work_struct *work)
struct hlist_node *n;
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
- struct netdev_vport *netdev_vport;
-
if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
continue;
- netdev_vport = netdev_vport_priv(vport);
- if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
+ if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
dp_detach_port_notify(vport);
}
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index bc7b0aba9..c8db44ab2 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -46,9 +46,11 @@
#include <net/mpls.h>
#include <net/ndisc.h>
+#include "conntrack.h"
#include "datapath.h"
#include "flow.h"
#include "flow_netlink.h"
+#include "vport.h"
u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
@@ -271,8 +273,6 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
key->ipv6.addr.dst = nh->daddr;
payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
- if (unlikely(payload_ofs < 0))
- return -EINVAL;
if (frag_off) {
if (frag_off & htons(~0x7))
@@ -283,6 +283,13 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
key->ip.frag = OVS_FRAG_TYPE_NONE;
}
+ /* Delayed handling of error in ipv6_skip_exthdr() as it
+ * always sets frag_off to a valid value which may be
+ * used to set key->ip.frag above.
+ */
+ if (unlikely(payload_ofs < 0))
+ return -EPROTO;
+
nh_len = payload_ofs - nh_ofs;
skb_set_transport_header(skb, nh_ofs + nh_len);
key->ip.proto = nexthdr;
@@ -622,12 +629,16 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
nh_len = parse_ipv6hdr(skb, key);
if (unlikely(nh_len < 0)) {
- memset(&key->ip, 0, sizeof(key->ip));
- memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
- if (nh_len == -EINVAL) {
+ switch (nh_len) {
+ case -EINVAL:
+ memset(&key->ip, 0, sizeof(key->ip));
+ memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
+ /* fall-through */
+ case -EPROTO:
skb->transport_header = skb->network_header;
error = 0;
- } else {
+ break;
+ default:
error = nh_len;
}
return error;
@@ -682,19 +693,22 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
return key_extract(skb, key);
}
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
struct sk_buff *skb, struct sw_flow_key *key)
{
/* Extract metadata from packet. */
if (tun_info) {
- memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
+ if (ip_tunnel_info_af(tun_info) != AF_INET)
+ return -EINVAL;
+ memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
- if (tun_info->options) {
+ if (tun_info->options_len) {
BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
8)) - 1
> sizeof(key->tun_opts));
- memcpy(TUN_METADATA_OPTS(key, tun_info->options_len),
- tun_info->options, tun_info->options_len);
+
+ ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
+ tun_info);
key->tun_opts_len = tun_info->options_len;
} else {
key->tun_opts_len = 0;
@@ -707,13 +721,14 @@ int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
key->phy.priority = skb->priority;
key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
key->phy.skb_mark = skb->mark;
+ ovs_ct_fill_key(skb, key);
key->ovs_flow_hash = 0;
key->recirc_id = 0;
return key_extract(skb, key);
}
-int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
struct sk_buff *skb,
struct sw_flow_key *key, bool log)
{
@@ -722,7 +737,7 @@ int ovs_flow_key_extract_userspace(const struct nlattr *attr,
memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
/* Extract metadata from netlink attributes. */
- err = ovs_nla_get_flow_metadata(attr, key, log);
+ err = ovs_nla_get_flow_metadata(net, attr, key, log);
if (err)
return err;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index a076e445c..8cfa15a08 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -32,31 +32,11 @@
#include <linux/time.h>
#include <linux/flex_array.h>
#include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+#include <net/dst_metadata.h>
struct sk_buff;
-/* Used to memset ovs_key_ipv4_tunnel padding. */
-#define OVS_TUNNEL_KEY_SIZE \
- (offsetof(struct ovs_key_ipv4_tunnel, tp_dst) + \
- FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst))
-
-struct ovs_key_ipv4_tunnel {
- __be64 tun_id;
- __be32 ipv4_src;
- __be32 ipv4_dst;
- __be16 tun_flags;
- u8 ipv4_tos;
- u8 ipv4_ttl;
- __be16 tp_src;
- __be16 tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
-
-struct ovs_tunnel_info {
- struct ovs_key_ipv4_tunnel tunnel;
- const void *options;
- u8 options_len;
-};
-
/* Store options at the end of the array if they are less than the
* maximum size. This allows us to get the benefits of variable length
* matching for small options.
@@ -66,54 +46,9 @@ struct ovs_tunnel_info {
#define TUN_METADATA_OPTS(flow_key, opt_len) \
((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
-static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
- __be32 saddr, __be32 daddr,
- u8 tos, u8 ttl,
- __be16 tp_src,
- __be16 tp_dst,
- __be64 tun_id,
- __be16 tun_flags,
- const void *opts,
- u8 opts_len)
-{
- tun_info->tunnel.tun_id = tun_id;
- tun_info->tunnel.ipv4_src = saddr;
- tun_info->tunnel.ipv4_dst = daddr;
- tun_info->tunnel.ipv4_tos = tos;
- tun_info->tunnel.ipv4_ttl = ttl;
- tun_info->tunnel.tun_flags = tun_flags;
-
- /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
- * the upper tunnel are used.
- * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
- */
- tun_info->tunnel.tp_src = tp_src;
- tun_info->tunnel.tp_dst = tp_dst;
-
- /* Clear struct padding. */
- if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE)
- memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE,
- 0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
-
- tun_info->options = opts;
- tun_info->options_len = opts_len;
-}
-
-static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
- const struct iphdr *iph,
- __be16 tp_src,
- __be16 tp_dst,
- __be64 tun_id,
- __be16 tun_flags,
- const void *opts,
- u8 opts_len)
-{
- __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
- iph->tos, iph->ttl,
- tp_src, tp_dst,
- tun_id, tun_flags,
- opts, opts_len);
-}
+struct ovs_tunnel_info {
+ struct metadata_dst *tun_dst;
+};
#define OVS_SW_FLOW_KEY_METADATA_SIZE \
(offsetof(struct sw_flow_key, recirc_id) + \
@@ -122,7 +57,7 @@ static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
struct sw_flow_key {
u8 tun_opts[255];
u8 tun_opts_len;
- struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */
+ struct ip_tunnel_key tun_key; /* Encapsulating tunnel key. */
struct {
u32 priority; /* Packet QoS priority. */
u32 skb_mark; /* SKB mark. */
@@ -176,6 +111,14 @@ struct sw_flow_key {
} nd;
} ipv6;
};
+ struct {
+ /* Connection tracking fields. */
+ u16 zone;
+ u32 mark;
+ u8 state;
+ struct ovs_key_ct_labels labels;
+ } ct;
+
} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
struct sw_flow_key_range {
@@ -209,6 +152,7 @@ struct sw_flow_id {
struct sw_flow_actions {
struct rcu_head rcu;
+ size_t orig_len; /* From flow_cmd_new netlink actions size */
u32 actions_len;
struct nlattr actions[];
};
@@ -273,11 +217,11 @@ void ovs_flow_stats_clear(struct sw_flow *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
struct sk_buff *skb,
struct sw_flow_key *key);
/* Extract key from packet coming from userspace. */
-int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
struct sk_buff *skb,
struct sw_flow_key *key, bool log);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624e41c42..38536c137 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -47,9 +47,9 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/mpls.h>
+#include <net/vxlan.h>
#include "flow_netlink.h"
-#include "vport-vxlan.h"
struct ovs_len_tbl {
int len;
@@ -57,6 +57,7 @@ struct ovs_len_tbl {
};
#define OVS_ATTR_NESTED -1
+#define OVS_ATTR_VARIABLE -2
static void update_range(struct sw_flow_match *match,
size_t offset, size_t size, bool is_mask)
@@ -281,7 +282,7 @@ size_t ovs_key_attr_size(void)
/* Whenever adding new OVS_KEY_ FIELDS, we should consider
* updating this function.
*/
- BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
+ BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 26);
return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
+ nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
@@ -290,6 +291,10 @@ size_t ovs_key_attr_size(void)
+ nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
+ nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
+ nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
+ + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
+ + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
+ + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
+ + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
+ nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
+ nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
+ nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
@@ -300,6 +305,10 @@ size_t ovs_key_attr_size(void)
+ nla_total_size(28); /* OVS_KEY_ATTR_ND */
}
+static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
+ [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
+};
+
static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
[OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
[OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
@@ -311,8 +320,9 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
[OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
[OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
[OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
- [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED },
- [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED },
+ [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
+ [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
+ .next = ovs_vxlan_ext_key_lens },
};
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
@@ -339,8 +349,19 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
.next = ovs_tunnel_key_lens, },
[OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
+ [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
+ [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
+ [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
+ [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
};
+static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
+{
+ return expected_len == attr_len ||
+ expected_len == OVS_ATTR_NESTED ||
+ expected_len == OVS_ATTR_VARIABLE;
+}
+
static bool is_all_zero(const u8 *fp, size_t size)
{
int i;
@@ -380,7 +401,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
}
expected_len = ovs_key_lens[type].len;
- if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) {
+ if (!check_attr_len(nla_len(nla), expected_len)) {
OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
type, nla_len(nla), expected_len);
return -EINVAL;
@@ -465,29 +486,50 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a,
return 0;
}
-static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = {
- [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 },
-};
-
-static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
+static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
{
- struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
+ struct nlattr *a;
+ int rem;
unsigned long opt_key_offset;
- struct ovs_vxlan_opts opts;
- int err;
+ struct vxlan_metadata opts;
BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
- err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
- if (err < 0)
- return err;
-
memset(&opts, 0, sizeof(opts));
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+
+ if (type > OVS_VXLAN_EXT_MAX) {
+ OVS_NLERR(log, "VXLAN extension %d out of range max %d",
+ type, OVS_VXLAN_EXT_MAX);
+ return -EINVAL;
+ }
- if (tb[OVS_VXLAN_EXT_GBP])
- opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]);
+ if (!check_attr_len(nla_len(a),
+ ovs_vxlan_ext_key_lens[type].len)) {
+ OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
+ type, nla_len(a),
+ ovs_vxlan_ext_key_lens[type].len);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case OVS_VXLAN_EXT_GBP:
+ opts.gbp = nla_get_u32(a);
+ break;
+ default:
+ OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
+ type);
+ return -EINVAL;
+ }
+ }
+ if (rem) {
+ OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
+ rem);
+ return -EINVAL;
+ }
if (!is_mask)
SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
@@ -520,8 +562,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
return -EINVAL;
}
- if (ovs_tunnel_key_lens[type].len != nla_len(a) &&
- ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) {
+ if (!check_attr_len(nla_len(a),
+ ovs_tunnel_key_lens[type].len)) {
OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
type, nla_len(a), ovs_tunnel_key_lens[type].len);
return -EINVAL;
@@ -534,19 +576,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
tun_flags |= TUNNEL_KEY;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+ SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
nla_get_in_addr(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+ SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
nla_get_in_addr(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+ SW_FLOW_KEY_PUT(match, tun_key.tos,
nla_get_u8(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TTL:
- SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+ SW_FLOW_KEY_PUT(match, tun_key.ttl,
nla_get_u8(a), is_mask);
ttl = true;
break;
@@ -609,7 +651,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
}
if (!is_mask) {
- if (!match->key->tun_key.ipv4_dst) {
+ if (!match->key->tun_key.u.ipv4.dst) {
OVS_NLERR(log, "IPv4 tunnel dst address is zero");
return -EINVAL;
}
@@ -626,7 +668,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
static int vxlan_opt_to_nlattr(struct sk_buff *skb,
const void *tun_opts, int swkey_tun_opts_len)
{
- const struct ovs_vxlan_opts *opts = tun_opts;
+ const struct vxlan_metadata *opts = tun_opts;
struct nlattr *nla;
nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
@@ -641,24 +683,24 @@ static int vxlan_opt_to_nlattr(struct sk_buff *skb,
}
static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *output,
+ const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len)
{
if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
return -EMSGSIZE;
- if (output->ipv4_src &&
+ if (output->u.ipv4.src &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
- output->ipv4_src))
+ output->u.ipv4.src))
return -EMSGSIZE;
- if (output->ipv4_dst &&
+ if (output->u.ipv4.dst &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
- output->ipv4_dst))
+ output->u.ipv4.dst))
return -EMSGSIZE;
- if (output->ipv4_tos &&
- nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+ if (output->tos &&
+ nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
return -EMSGSIZE;
- if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+ if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
@@ -675,7 +717,7 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
if ((output->tun_flags & TUNNEL_OAM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
- if (tun_opts) {
+ if (swkey_tun_opts_len) {
if (output->tun_flags & TUNNEL_GENEVE_OPT &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
@@ -689,7 +731,7 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
}
static int ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *output,
+ const struct ip_tunnel_key *output,
const void *tun_opts, int swkey_tun_opts_len)
{
struct nlattr *nla;
@@ -707,17 +749,17 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb,
return 0;
}
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
- const struct ovs_tunnel_info *egress_tun_info)
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info)
{
- return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
- egress_tun_info->options,
- egress_tun_info->options_len);
+ return __ipv4_tun_to_nlattr(skb, &tun_info->key,
+ ip_tunnel_info_opts(tun_info),
+ tun_info->options_len);
}
-static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
- const struct nlattr **a, bool is_mask,
- bool log)
+static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
+ u64 *attrs, const struct nlattr **a,
+ bool is_mask, bool log)
{
if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
@@ -768,16 +810,53 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
return -EINVAL;
*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
}
+
+ if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
+ ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
+ u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
+
+ if (ct_state & ~CT_SUPPORTED_MASK) {
+ OVS_NLERR(log, "ct_state flags %08x unsupported",
+ ct_state);
+ return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
+ *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
+ }
+ if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
+ ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
+ u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
+
+ SW_FLOW_KEY_PUT(match, ct.zone, ct_zone, is_mask);
+ *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
+ }
+ if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
+ ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
+ u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
+
+ SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
+ *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
+ }
+ if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
+ ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
+ const struct ovs_key_ct_labels *cl;
+
+ cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
+ SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
+ sizeof(*cl), is_mask);
+ *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
+ }
return 0;
}
-static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
- const struct nlattr **a, bool is_mask,
- bool log)
+static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
+ u64 attrs, const struct nlattr **a,
+ bool is_mask, bool log)
{
int err;
- err = metadata_from_nlattrs(match, &attrs, a, is_mask, log);
+ err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
if (err)
return err;
@@ -1012,10 +1091,16 @@ static void nlattr_set(struct nlattr *attr, u8 val,
/* The nlattr stream should already have been validated */
nla_for_each_nested(nla, attr, rem) {
- if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
- nlattr_set(nla, val, tbl[nla_type(nla)].next);
- else
+ if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
+ if (tbl[nla_type(nla)].next)
+ tbl = tbl[nla_type(nla)].next;
+ nlattr_set(nla, val, tbl);
+ } else {
memset(nla_data(nla), val, nla_len(nla));
+ }
+
+ if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
+ *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
}
}
@@ -1029,6 +1114,7 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val)
* mask. In case the 'mask' is NULL, the flow is treated as exact match
* flow. Otherwise, it is treated as a wildcarded flow, except the mask
* does not include any don't care bit.
+ * @net: Used to determine per-namespace field support.
* @match: receives the extracted flow match information.
* @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence. The fields should of the packet that triggered the creation
@@ -1039,7 +1125,7 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val)
* probing for feature compatibility this should be passed in as false to
* suppress unnecessary error logging.
*/
-int ovs_nla_get_match(struct sw_flow_match *match,
+int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
const struct nlattr *nla_key,
const struct nlattr *nla_mask,
bool log)
@@ -1089,7 +1175,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
}
}
- err = ovs_key_from_nlattrs(match, key_attrs, a, false, log);
+ err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
if (err)
return err;
@@ -1116,7 +1202,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
/* The userspace does not send tunnel attributes that
* are 0, but we should not wildcard them nonetheless.
*/
- if (match->key->tun_key.ipv4_dst)
+ if (match->key->tun_key.u.ipv4.dst)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
0xff, true);
@@ -1169,7 +1255,8 @@ int ovs_nla_get_match(struct sw_flow_match *match,
}
}
- err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log);
+ err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
+ log);
if (err)
goto free_newmask;
}
@@ -1250,7 +1337,7 @@ u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
* extracted from the packet itself.
*/
-int ovs_nla_get_flow_metadata(const struct nlattr *attr,
+int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
struct sw_flow_key *key,
bool log)
{
@@ -1266,9 +1353,10 @@ int ovs_nla_get_flow_metadata(const struct nlattr *attr,
memset(&match, 0, sizeof(match));
match.key = key;
+ memset(&key->ct, 0, sizeof(key->ct));
key->phy.in_port = DP_MAX_PORTS;
- return metadata_from_nlattrs(&match, &attrs, a, false, log);
+ return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
}
static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
@@ -1287,7 +1375,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure;
- if ((swkey->tun_key.ipv4_dst || is_mask)) {
+ if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
const void *opts = NULL;
if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
@@ -1314,6 +1402,9 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
goto nla_put_failure;
+ if (ovs_ct_put_key(output, skb))
+ goto nla_put_failure;
+
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
if (!nla)
goto nla_put_failure;
@@ -1548,11 +1639,51 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
return sfa;
}
+static void ovs_nla_free_set_action(const struct nlattr *a)
+{
+ const struct nlattr *ovs_key = nla_data(a);
+ struct ovs_tunnel_info *ovs_tun;
+
+ switch (nla_type(ovs_key)) {
+ case OVS_KEY_ATTR_TUNNEL_INFO:
+ ovs_tun = nla_data(ovs_key);
+ dst_release((struct dst_entry *)ovs_tun->tun_dst);
+ break;
+ }
+}
+
+void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+{
+ const struct nlattr *a;
+ int rem;
+
+ if (!sf_acts)
+ return;
+
+ nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
+ switch (nla_type(a)) {
+ case OVS_ACTION_ATTR_SET:
+ ovs_nla_free_set_action(a);
+ break;
+ case OVS_ACTION_ATTR_CT:
+ ovs_ct_free_action(a);
+ break;
+ }
+ }
+
+ kfree(sf_acts);
+}
+
+static void __ovs_nla_free_flow_actions(struct rcu_head *head)
+{
+ ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
+}
+
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
* The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
{
- kfree_rcu(sf_acts, rcu);
+ call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
}
static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
@@ -1582,6 +1713,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
acts->actions_len = (*sfa)->actions_len;
+ acts->orig_len = (*sfa)->orig_len;
kfree(*sfa);
*sfa = acts;
@@ -1609,8 +1741,8 @@ static struct nlattr *__add_action(struct sw_flow_actions **sfa,
return a;
}
-static int add_action(struct sw_flow_actions **sfa, int attrtype,
- void *data, int len, bool log)
+int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
+ int len, bool log)
{
struct nlattr *a;
@@ -1625,7 +1757,7 @@ static inline int add_nested_action_start(struct sw_flow_actions **sfa,
int used = (*sfa)->actions_len;
int err;
- err = add_action(sfa, attrtype, NULL, 0, log);
+ err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
if (err)
return err;
@@ -1641,12 +1773,12 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa,
a->nla_len = sfa->actions_len - st_offset;
}
-static int __ovs_nla_copy_actions(const struct nlattr *attr,
+static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
int depth, struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci, bool log);
-static int validate_and_copy_sample(const struct nlattr *attr,
+static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key, int depth,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci, bool log)
@@ -1678,15 +1810,15 @@ static int validate_and_copy_sample(const struct nlattr *attr,
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
if (start < 0)
return start;
- err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
- nla_data(probability), sizeof(u32), log);
+ err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
+ nla_data(probability), sizeof(u32), log);
if (err)
return err;
st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
if (st_acts < 0)
return st_acts;
- err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa,
+ err = __ovs_nla_copy_actions(net, actions, key, depth + 1, sfa,
eth_type, vlan_tci, log);
if (err)
return err;
@@ -1746,7 +1878,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
{
struct sw_flow_match match;
struct sw_flow_key key;
- struct ovs_tunnel_info *tun_info;
+ struct metadata_dst *tun_dst;
+ struct ip_tunnel_info *tun_info;
+ struct ovs_tunnel_info *ovs_tun;
struct nlattr *a;
int err = 0, start, opts_type;
@@ -1771,27 +1905,31 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
if (start < 0)
return start;
+ tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
+ if (!tun_dst)
+ return -ENOMEM;
+
a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
- sizeof(*tun_info) + key.tun_opts_len, log);
- if (IS_ERR(a))
+ sizeof(*ovs_tun), log);
+ if (IS_ERR(a)) {
+ dst_release((struct dst_entry *)tun_dst);
return PTR_ERR(a);
+ }
- tun_info = nla_data(a);
- tun_info->tunnel = key.tun_key;
- tun_info->options_len = key.tun_opts_len;
+ ovs_tun = nla_data(a);
+ ovs_tun->tun_dst = tun_dst;
- if (tun_info->options_len) {
- /* We need to store the options in the action itself since
- * everything else will go away after flow setup. We can append
- * it to tun_info and then point there.
- */
- memcpy((tun_info + 1),
- TUN_METADATA_OPTS(&key, key.tun_opts_len), key.tun_opts_len);
- tun_info->options = (tun_info + 1);
- } else {
- tun_info->options = NULL;
- }
+ tun_info = &tun_dst->u.tun_info;
+ tun_info->mode = IP_TUNNEL_INFO_TX;
+ tun_info->key = key.tun_key;
+ /* We need to store the options in the action itself since
+ * everything else will go away after flow setup. We can append
+ * it to tun_info and then point there.
+ */
+ ip_tunnel_info_opts_set(tun_info,
+ TUN_METADATA_OPTS(&key, key.tun_opts_len),
+ key.tun_opts_len);
add_nested_action_end(*sfa, start);
return err;
@@ -1829,8 +1967,7 @@ static int validate_set(const struct nlattr *a,
key_len /= 2;
if (key_type > OVS_KEY_ATTR_MAX ||
- (ovs_key_lens[key_type].len != key_len &&
- ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
+ !check_attr_len(key_len, ovs_key_lens[key_type].len))
return -EINVAL;
if (masked && !validate_masked(nla_data(ovs_key), key_len))
@@ -1843,6 +1980,8 @@ static int validate_set(const struct nlattr *a,
case OVS_KEY_ATTR_PRIORITY:
case OVS_KEY_ATTR_SKB_MARK:
+ case OVS_KEY_ATTR_CT_MARK:
+ case OVS_KEY_ATTR_CT_LABELS:
case OVS_KEY_ATTR_ETHERNET:
break;
@@ -2008,7 +2147,7 @@ static int copy_action(const struct nlattr *from,
return 0;
}
-static int __ovs_nla_copy_actions(const struct nlattr *attr,
+static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
int depth, struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci, bool log)
@@ -2032,7 +2171,8 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
[OVS_ACTION_ATTR_SET] = (u32)-1,
[OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
[OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
- [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
+ [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
+ [OVS_ACTION_ATTR_CT] = (u32)-1,
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
@@ -2139,13 +2279,20 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
break;
case OVS_ACTION_ATTR_SAMPLE:
- err = validate_and_copy_sample(a, key, depth, sfa,
+ err = validate_and_copy_sample(net, a, key, depth, sfa,
eth_type, vlan_tci, log);
if (err)
return err;
skip_copy = true;
break;
+ case OVS_ACTION_ATTR_CT:
+ err = ovs_ct_copy_action(net, a, key, sfa, log);
+ if (err)
+ return err;
+ skip_copy = true;
+ break;
+
default:
OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
@@ -2164,7 +2311,7 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
}
/* 'key' must be the masked key. */
-int ovs_nla_copy_actions(const struct nlattr *attr,
+int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa, bool log)
{
@@ -2174,10 +2321,11 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);
- err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
+ (*sfa)->orig_len = nla_len(attr);
+ err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
key->eth.tci, log);
if (err)
- kfree(*sfa);
+ ovs_nla_free_flow_actions(*sfa);
return err;
}
@@ -2227,16 +2375,14 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
switch (key_type) {
case OVS_KEY_ATTR_TUNNEL_INFO: {
- struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+ struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
+ struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
if (!start)
return -EMSGSIZE;
- err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
- tun_info->options_len ?
- tun_info->options : NULL,
- tun_info->options_len);
+ err = ovs_nla_put_tunnel_info(skb, tun_info);
if (err)
return err;
nla_nest_end(skb, start);
@@ -2298,6 +2444,13 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
if (err)
return err;
break;
+
+ case OVS_ACTION_ATTR_CT:
+ err = ovs_ct_action_to_attr(nla_data(a), skb);
+ if (err)
+ return err;
+ break;
+
default:
if (nla_put(skb, type, nla_len(a), nla_data(a)))
return -EMSGSIZE;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 5c3d75bff..47dd142ec 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -45,29 +45,34 @@ void ovs_match_init(struct sw_flow_match *match,
int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
int attr, bool is_mask, struct sk_buff *);
-int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *,
- bool log);
+int ovs_nla_get_flow_metadata(struct net *, const struct nlattr *,
+ struct sw_flow_key *, bool log);
int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
-int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
- const struct nlattr *mask, bool log);
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
- const struct ovs_tunnel_info *);
+int ovs_nla_get_match(struct net *, struct sw_flow_match *,
+ const struct nlattr *key, const struct nlattr *mask,
+ bool log);
+
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+ struct ip_tunnel_info *tun_info);
bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
const struct sw_flow_key *key, bool log);
u32 ovs_nla_get_ufid_flags(const struct nlattr *attr);
-int ovs_nla_copy_actions(const struct nlattr *attr,
+int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa, bool log);
+int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype,
+ void *data, int len, bool log);
int ovs_nla_put_actions(const struct nlattr *attr,
int len, struct sk_buff *skb);
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
#endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index af08e6fc9..c7f74aab3 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -18,6 +18,7 @@
#include "flow.h"
#include "datapath.h"
+#include "flow_netlink.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -145,7 +146,8 @@ static void flow_free(struct sw_flow *flow)
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
- kfree((struct sw_flow_actions __force *)flow->sf_acts);
+ if (flow->sf_acts)
+ ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
for_each_node(node)
if (flow->stats[node])
kmem_cache_free(flow_stats_cache,
@@ -426,7 +428,7 @@ static u32 flow_hash(const struct sw_flow_key *key,
static int flow_key_start(const struct sw_flow_key *key)
{
- if (key->tun_key.ipv4_dst)
+ if (key->tun_key.u.ipv4.dst)
return 0;
else
return rounddown(offsetof(struct sw_flow_key, phy),
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 208c576bd..5f8aaaaa0 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -26,108 +26,42 @@
#include "datapath.h"
#include "vport.h"
+#include "vport-netdev.h"
static struct vport_ops ovs_geneve_vport_ops;
-
/**
* struct geneve_port - Keeps track of open UDP ports
- * @gs: The socket created for this port number.
- * @name: vport name.
+ * @dst_port: destination port.
*/
struct geneve_port {
- struct geneve_sock *gs;
- char name[IFNAMSIZ];
+ u16 port_no;
};
-static LIST_HEAD(geneve_ports);
-
static inline struct geneve_port *geneve_vport(const struct vport *vport)
{
return vport_priv(vport);
}
-/* Convert 64 bit tunnel ID to 24 bit VNI. */
-static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
-{
-#ifdef __BIG_ENDIAN
- vni[0] = (__force __u8)(tun_id >> 16);
- vni[1] = (__force __u8)(tun_id >> 8);
- vni[2] = (__force __u8)tun_id;
-#else
- vni[0] = (__force __u8)((__force u64)tun_id >> 40);
- vni[1] = (__force __u8)((__force u64)tun_id >> 48);
- vni[2] = (__force __u8)((__force u64)tun_id >> 56);
-#endif
-}
-
-/* Convert 24 bit VNI to 64 bit tunnel ID. */
-static __be64 vni_to_tunnel_id(const __u8 *vni)
-{
-#ifdef __BIG_ENDIAN
- return (vni[0] << 16) | (vni[1] << 8) | vni[2];
-#else
- return (__force __be64)(((__force u64)vni[0] << 40) |
- ((__force u64)vni[1] << 48) |
- ((__force u64)vni[2] << 56));
-#endif
-}
-
-static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
-{
- struct vport *vport = gs->rcv_data;
- struct genevehdr *geneveh = geneve_hdr(skb);
- int opts_len;
- struct ovs_tunnel_info tun_info;
- __be64 key;
- __be16 flags;
-
- opts_len = geneveh->opt_len * 4;
-
- flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
- (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
- (geneveh->oam ? TUNNEL_OAM : 0) |
- (geneveh->critical ? TUNNEL_CRIT_OPT : 0);
-
- key = vni_to_tunnel_id(geneveh->vni);
-
- ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
- udp_hdr(skb)->source, udp_hdr(skb)->dest,
- key, flags,
- geneveh->options, opts_len);
-
- ovs_vport_receive(vport, skb, &tun_info);
-}
-
static int geneve_get_options(const struct vport *vport,
struct sk_buff *skb)
{
struct geneve_port *geneve_port = geneve_vport(vport);
- struct inet_sock *sk = inet_sk(geneve_port->gs->sock->sk);
- if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(sk->inet_sport)))
+ if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, geneve_port->port_no))
return -EMSGSIZE;
return 0;
}
-static void geneve_tnl_destroy(struct vport *vport)
-{
- struct geneve_port *geneve_port = geneve_vport(vport);
-
- geneve_sock_release(geneve_port->gs);
-
- ovs_vport_deferred_free(vport);
-}
-
static struct vport *geneve_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct nlattr *options = parms->options;
struct geneve_port *geneve_port;
- struct geneve_sock *gs;
+ struct net_device *dev;
struct vport *vport;
struct nlattr *a;
- int err;
u16 dst_port;
+ int err;
if (!options) {
err = -EINVAL;
@@ -149,106 +83,41 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
return vport;
geneve_port = geneve_vport(vport);
- strncpy(geneve_port->name, parms->name, IFNAMSIZ);
+ geneve_port->port_no = dst_port;
- gs = geneve_sock_add(net, htons(dst_port), geneve_rcv, vport, true, 0);
- if (IS_ERR(gs)) {
+ rtnl_lock();
+ dev = geneve_dev_create_fb(net, parms->name, NET_NAME_USER, dst_port);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
ovs_vport_free(vport);
- return (void *)gs;
+ return ERR_CAST(dev);
}
- geneve_port->gs = gs;
+ dev_change_flags(dev, dev->flags | IFF_UP);
+ rtnl_unlock();
return vport;
error:
return ERR_PTR(err);
}
-static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
+static struct vport *geneve_create(const struct vport_parms *parms)
{
- const struct ovs_key_ipv4_tunnel *tun_key;
- struct ovs_tunnel_info *tun_info;
- struct net *net = ovs_dp_get_net(vport->dp);
- struct geneve_port *geneve_port = geneve_vport(vport);
- __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
- __be16 sport;
- struct rtable *rt;
- struct flowi4 fl;
- u8 vni[3], opts_len, *opts;
- __be16 df;
- int err;
-
- tun_info = OVS_CB(skb)->egress_tun_info;
- if (unlikely(!tun_info)) {
- err = -EINVAL;
- goto error;
- }
-
- tun_key = &tun_info->tunnel;
- rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
- if (IS_ERR(rt)) {
- err = PTR_ERR(rt);
- goto error;
- }
-
- df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
- sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
- tunnel_id_to_vni(tun_key->tun_id, vni);
- skb->ignore_df = 1;
-
- if (tun_key->tun_flags & TUNNEL_GENEVE_OPT) {
- opts = (u8 *)tun_info->options;
- opts_len = tun_info->options_len;
- } else {
- opts = NULL;
- opts_len = 0;
- }
-
- err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
- tun_key->ipv4_dst, tun_key->ipv4_tos,
- tun_key->ipv4_ttl, df, sport, dport,
- tun_key->tun_flags, vni, opts_len, opts,
- !!(tun_key->tun_flags & TUNNEL_CSUM), false);
- if (err < 0)
- ip_rt_put(rt);
- return err;
-
-error:
- kfree_skb(skb);
- return err;
-}
-
-static const char *geneve_get_name(const struct vport *vport)
-{
- struct geneve_port *geneve_port = geneve_vport(vport);
-
- return geneve_port->name;
-}
+ struct vport *vport;
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *egress_tun_info)
-{
- struct geneve_port *geneve_port = geneve_vport(vport);
- struct net *net = ovs_dp_get_net(vport->dp);
- __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
- __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
+ vport = geneve_tnl_create(parms);
+ if (IS_ERR(vport))
+ return vport;
- /* Get tp_src and tp_dst, refert to geneve_build_header().
- */
- return ovs_tunnel_get_egress_info(egress_tun_info,
- ovs_dp_get_net(vport->dp),
- OVS_CB(skb)->egress_tun_info,
- IPPROTO_UDP, skb->mark, sport, dport);
+ return ovs_netdev_link(vport, parms->name);
}
static struct vport_ops ovs_geneve_vport_ops = {
.type = OVS_VPORT_TYPE_GENEVE,
- .create = geneve_tnl_create,
- .destroy = geneve_tnl_destroy,
- .get_name = geneve_get_name,
+ .create = geneve_create,
+ .destroy = ovs_netdev_tunnel_destroy,
.get_options = geneve_get_options,
- .send = geneve_tnl_send,
+ .send = ovs_netdev_send,
.owner = THIS_MODULE,
- .get_egress_tun_info = geneve_get_egress_tun_info,
};
static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index f17ac9642..64225bf5e 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -45,253 +45,50 @@
#include "datapath.h"
#include "vport.h"
+#include "vport-netdev.h"
static struct vport_ops ovs_gre_vport_ops;
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 be64_get_low32(__be64 x)
+static struct vport *gre_tnl_create(const struct vport_parms *parms)
{
-#ifdef __BIG_ENDIAN
- return (__force __be32)x;
-#else
- return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
-static __be16 filter_tnl_flags(__be16 flags)
-{
- return flags & (TUNNEL_CSUM | TUNNEL_KEY);
-}
-
-static struct sk_buff *__build_header(struct sk_buff *skb,
- int tunnel_hlen)
-{
- struct tnl_ptk_info tpi;
- const struct ovs_key_ipv4_tunnel *tun_key;
-
- tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-
- skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
- if (IS_ERR(skb))
- return skb;
-
- tpi.flags = filter_tnl_flags(tun_key->tun_flags);
- tpi.proto = htons(ETH_P_TEB);
- tpi.key = be64_get_low32(tun_key->tun_id);
- tpi.seq = 0;
- gre_build_header(skb, &tpi, tunnel_hlen);
-
- return skb;
-}
-
-static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
-{
-#ifdef __BIG_ENDIAN
- return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
-#else
- return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
-#endif
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_rcv(struct sk_buff *skb,
- const struct tnl_ptk_info *tpi)
-{
- struct ovs_tunnel_info tun_info;
- struct ovs_net *ovs_net;
- struct vport *vport;
- __be64 key;
-
- ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
- vport = rcu_dereference(ovs_net->vport_net.gre_vport);
- if (unlikely(!vport))
- return PACKET_REJECT;
-
- key = key_to_tunnel_id(tpi->key, tpi->seq);
- ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
- filter_tnl_flags(tpi->flags), NULL, 0);
-
- ovs_vport_receive(vport, skb, &tun_info);
- return PACKET_RCVD;
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_err(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi)
-{
- struct ovs_net *ovs_net;
+ struct net *net = ovs_dp_get_net(parms->dp);
+ struct net_device *dev;
struct vport *vport;
- ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
- vport = rcu_dereference(ovs_net->vport_net.gre_vport);
-
- if (unlikely(!vport))
- return PACKET_REJECT;
- else
- return PACKET_RCVD;
-}
-
-static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
- struct net *net = ovs_dp_get_net(vport->dp);
- const struct ovs_key_ipv4_tunnel *tun_key;
- struct flowi4 fl;
- struct rtable *rt;
- int min_headroom;
- int tunnel_hlen;
- __be16 df;
- int err;
-
- if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
- err = -EINVAL;
- goto err_free_skb;
- }
-
- tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
- rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
- if (IS_ERR(rt)) {
- err = PTR_ERR(rt);
- goto err_free_skb;
- }
-
- tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
-
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + tunnel_hlen + sizeof(struct iphdr)
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
- if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
- int head_delta = SKB_DATA_ALIGN(min_headroom -
- skb_headroom(skb) +
- 16);
- err = pskb_expand_head(skb, max_t(int, head_delta, 0),
- 0, GFP_ATOMIC);
- if (unlikely(err))
- goto err_free_rt;
- }
-
- skb = vlan_hwaccel_push_inside(skb);
- if (unlikely(!skb)) {
- err = -ENOMEM;
- goto err_free_rt;
- }
-
- /* Push Tunnel header. */
- skb = __build_header(skb, tunnel_hlen);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- skb = NULL;
- goto err_free_rt;
+ vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
+
+ rtnl_lock();
+ dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
+ ovs_vport_free(vport);
+ return ERR_CAST(dev);
}
- df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
- htons(IP_DF) : 0;
-
- skb->ignore_df = 1;
-
- return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
- tun_key->ipv4_dst, IPPROTO_GRE,
- tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
-err_free_rt:
- ip_rt_put(rt);
-err_free_skb:
- kfree_skb(skb);
- return err;
-}
-
-static struct gre_cisco_protocol gre_protocol = {
- .handler = gre_rcv,
- .err_handler = gre_err,
- .priority = 1,
-};
-
-static int gre_ports;
-static int gre_init(void)
-{
- int err;
-
- gre_ports++;
- if (gre_ports > 1)
- return 0;
-
- err = gre_cisco_register(&gre_protocol);
- if (err)
- pr_warn("cannot register gre protocol handler\n");
-
- return err;
-}
-
-static void gre_exit(void)
-{
- gre_ports--;
- if (gre_ports > 0)
- return;
-
- gre_cisco_unregister(&gre_protocol);
-}
+ dev_change_flags(dev, dev->flags | IFF_UP);
+ rtnl_unlock();
-static const char *gre_get_name(const struct vport *vport)
-{
- return vport_priv(vport);
+ return vport;
}
static struct vport *gre_create(const struct vport_parms *parms)
{
- struct net *net = ovs_dp_get_net(parms->dp);
- struct ovs_net *ovs_net;
struct vport *vport;
- int err;
- err = gre_init();
- if (err)
- return ERR_PTR(err);
-
- ovs_net = net_generic(net, ovs_net_id);
- if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
- vport = ERR_PTR(-EEXIST);
- goto error;
- }
-
- vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
+ vport = gre_tnl_create(parms);
if (IS_ERR(vport))
- goto error;
-
- strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
- rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
- return vport;
+ return vport;
-error:
- gre_exit();
- return vport;
-}
-
-static void gre_tnl_destroy(struct vport *vport)
-{
- struct net *net = ovs_dp_get_net(vport->dp);
- struct ovs_net *ovs_net;
-
- ovs_net = net_generic(net, ovs_net_id);
-
- RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
- ovs_vport_deferred_free(vport);
- gre_exit();
-}
-
-static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *egress_tun_info)
-{
- return ovs_tunnel_get_egress_info(egress_tun_info,
- ovs_dp_get_net(vport->dp),
- OVS_CB(skb)->egress_tun_info,
- IPPROTO_GRE, skb->mark, 0, 0);
+ return ovs_netdev_link(vport, parms->name);
}
static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
- .destroy = gre_tnl_destroy,
- .get_name = gre_get_name,
- .send = gre_tnl_send,
- .get_egress_tun_info = gre_get_egress_tun_info,
+ .send = ovs_netdev_send,
+ .destroy = ovs_netdev_tunnel_destroy,
.owner = THIS_MODULE,
};
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 6a55f7105..b3934126d 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -43,35 +43,26 @@ static struct internal_dev *internal_dev_priv(struct net_device *netdev)
return netdev_priv(netdev);
}
-/* This function is only called by the kernel network layer.*/
-static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
-{
- struct vport *vport = ovs_internal_dev_get_vport(netdev);
- struct ovs_vport_stats vport_stats;
-
- ovs_vport_get_stats(vport, &vport_stats);
-
- /* The tx and rx stats need to be swapped because the
- * switch and host OS have opposite perspectives. */
- stats->rx_packets = vport_stats.tx_packets;
- stats->tx_packets = vport_stats.rx_packets;
- stats->rx_bytes = vport_stats.tx_bytes;
- stats->tx_bytes = vport_stats.rx_bytes;
- stats->rx_errors = vport_stats.tx_errors;
- stats->tx_errors = vport_stats.rx_errors;
- stats->rx_dropped = vport_stats.tx_dropped;
- stats->tx_dropped = vport_stats.rx_dropped;
-
- return stats;
-}
-
/* Called with rcu_read_lock_bh. */
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
+ int len, err;
+
+ len = skb->len;
rcu_read_lock();
- ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
+ err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
rcu_read_unlock();
+
+ if (likely(!err)) {
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += len;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
+ } else {
+ netdev->stats.tx_errors++;
+ }
return 0;
}
@@ -115,13 +106,45 @@ static void internal_dev_destructor(struct net_device *dev)
free_netdev(dev);
}
+static struct rtnl_link_stats64 *
+internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->tx_errors = dev->stats.tx_errors;
+ stats->tx_dropped = dev->stats.tx_dropped;
+ stats->rx_dropped = dev->stats.rx_dropped;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *percpu_stats;
+ struct pcpu_sw_netstats local_stats;
+ unsigned int start;
+
+ percpu_stats = per_cpu_ptr(dev->tstats, i);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
+ local_stats = *percpu_stats;
+ } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
+
+ stats->rx_bytes += local_stats.rx_bytes;
+ stats->rx_packets += local_stats.rx_packets;
+ stats->tx_bytes += local_stats.tx_bytes;
+ stats->tx_packets += local_stats.tx_packets;
+ }
+
+ return stats;
+}
+
static const struct net_device_ops internal_dev_netdev_ops = {
.ndo_open = internal_dev_open,
.ndo_stop = internal_dev_stop,
.ndo_start_xmit = internal_dev_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = internal_dev_change_mtu,
- .ndo_get_stats64 = internal_dev_get_stats,
+ .ndo_get_stats64 = internal_get_stats,
};
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -135,7 +158,7 @@ static void do_setup(struct net_device *netdev)
netdev->netdev_ops = &internal_dev_netdev_ops;
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
- netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH;
netdev->destructor = internal_dev_destructor;
netdev->ethtool_ops = &internal_dev_ethtool_ops;
netdev->rtnl_link_ops = &internal_dev_link_ops;
@@ -156,49 +179,51 @@ static void do_setup(struct net_device *netdev)
static struct vport *internal_dev_create(const struct vport_parms *parms)
{
struct vport *vport;
- struct netdev_vport *netdev_vport;
struct internal_dev *internal_dev;
int err;
- vport = ovs_vport_alloc(sizeof(struct netdev_vport),
- &ovs_internal_vport_ops, parms);
+ vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
}
- netdev_vport = netdev_vport_priv(vport);
-
- netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
- parms->name, NET_NAME_UNKNOWN,
- do_setup);
- if (!netdev_vport->dev) {
+ vport->dev = alloc_netdev(sizeof(struct internal_dev),
+ parms->name, NET_NAME_UNKNOWN, do_setup);
+ if (!vport->dev) {
err = -ENOMEM;
goto error_free_vport;
}
+ vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!vport->dev->tstats) {
+ err = -ENOMEM;
+ goto error_free_netdev;
+ }
- dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
- internal_dev = internal_dev_priv(netdev_vport->dev);
+ dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
+ internal_dev = internal_dev_priv(vport->dev);
internal_dev->vport = vport;
/* Restrict bridge port to current netns. */
if (vport->port_no == OVSP_LOCAL)
- netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+ vport->dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_lock();
- err = register_netdevice(netdev_vport->dev);
+ err = register_netdevice(vport->dev);
if (err)
- goto error_free_netdev;
+ goto error_unlock;
- dev_set_promiscuity(netdev_vport->dev, 1);
+ dev_set_promiscuity(vport->dev, 1);
rtnl_unlock();
- netif_start_queue(netdev_vport->dev);
+ netif_start_queue(vport->dev);
return vport;
-error_free_netdev:
+error_unlock:
rtnl_unlock();
- free_netdev(netdev_vport->dev);
+ free_percpu(vport->dev->tstats);
+error_free_netdev:
+ free_netdev(vport->dev);
error_free_vport:
ovs_vport_free(vport);
error:
@@ -207,30 +232,27 @@ error:
static void internal_dev_destroy(struct vport *vport)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
- netif_stop_queue(netdev_vport->dev);
+ netif_stop_queue(vport->dev);
rtnl_lock();
- dev_set_promiscuity(netdev_vport->dev, -1);
+ dev_set_promiscuity(vport->dev, -1);
/* unregister_netdevice() waits for an RCU grace period. */
- unregister_netdevice(netdev_vport->dev);
-
+ unregister_netdevice(vport->dev);
+ free_percpu(vport->dev->tstats);
rtnl_unlock();
}
-static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
- struct net_device *netdev = netdev_vport_priv(vport)->dev;
- int len;
+ struct net_device *netdev = vport->dev;
+ struct pcpu_sw_netstats *stats;
if (unlikely(!(netdev->flags & IFF_UP))) {
kfree_skb(skb);
- return 0;
+ netdev->stats.rx_dropped++;
+ return;
}
- len = skb->len;
-
skb_dst_drop(skb);
nf_reset(skb);
secpath_reset(skb);
@@ -240,16 +262,19 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
skb->protocol = eth_type_trans(skb, netdev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
- netif_rx(skb);
+ stats = this_cpu_ptr(netdev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
- return len;
+ netif_rx(skb);
}
static struct vport_ops ovs_internal_vport_ops = {
.type = OVS_VPORT_TYPE_INTERNAL,
.create = internal_dev_create,
.destroy = internal_dev_destroy,
- .get_name = ovs_netdev_get_name,
.send = internal_dev_recv,
};
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 33e6d6e29..f7e8dcce7 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -26,18 +26,24 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/openvswitch.h>
+#include <linux/export.h>
-#include <net/llc.h>
+#include <net/ip_tunnels.h>
+#include <net/rtnetlink.h>
#include "datapath.h"
+#include "vport.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
static struct vport_ops ovs_netdev_vport_ops;
/* Must be called with rcu_read_lock. */
-static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
+static void netdev_port_receive(struct sk_buff *skb)
{
+ struct vport *vport;
+
+ vport = ovs_netdev_get_vport(skb->dev);
if (unlikely(!vport))
goto error;
@@ -53,10 +59,8 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
skb_push(skb, ETH_HLEN);
ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
-
- ovs_vport_receive(vport, skb, NULL);
+ ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
return;
-
error:
kfree_skb(skb);
}
@@ -65,15 +69,11 @@ error:
static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
- struct vport *vport;
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
- vport = ovs_netdev_get_vport(skb->dev);
-
- netdev_port_receive(vport, skb);
-
+ netdev_port_receive(skb);
return RX_HANDLER_CONSUMED;
}
@@ -83,105 +83,112 @@ static struct net_device *get_dpdev(const struct datapath *dp)
local = ovs_vport_ovsl(dp, OVSP_LOCAL);
BUG_ON(!local);
- return netdev_vport_priv(local)->dev;
+ return local->dev;
}
-static struct vport *netdev_create(const struct vport_parms *parms)
+struct vport *ovs_netdev_link(struct vport *vport, const char *name)
{
- struct vport *vport;
- struct netdev_vport *netdev_vport;
int err;
- vport = ovs_vport_alloc(sizeof(struct netdev_vport),
- &ovs_netdev_vport_ops, parms);
- if (IS_ERR(vport)) {
- err = PTR_ERR(vport);
- goto error;
- }
-
- netdev_vport = netdev_vport_priv(vport);
-
- netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
- if (!netdev_vport->dev) {
+ vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
+ if (!vport->dev) {
err = -ENODEV;
goto error_free_vport;
}
- if (netdev_vport->dev->flags & IFF_LOOPBACK ||
- netdev_vport->dev->type != ARPHRD_ETHER ||
- ovs_is_internal_dev(netdev_vport->dev)) {
+ if (vport->dev->flags & IFF_LOOPBACK ||
+ vport->dev->type != ARPHRD_ETHER ||
+ ovs_is_internal_dev(vport->dev)) {
err = -EINVAL;
goto error_put;
}
rtnl_lock();
- err = netdev_master_upper_dev_link(netdev_vport->dev,
+ err = netdev_master_upper_dev_link(vport->dev,
get_dpdev(vport->dp));
if (err)
goto error_unlock;
- err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+ err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
vport);
if (err)
goto error_master_upper_dev_unlink;
- dev_disable_lro(netdev_vport->dev);
- dev_set_promiscuity(netdev_vport->dev, 1);
- netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+ dev_disable_lro(vport->dev);
+ dev_set_promiscuity(vport->dev, 1);
+ vport->dev->priv_flags |= IFF_OVS_DATAPATH;
rtnl_unlock();
return vport;
error_master_upper_dev_unlink:
- netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+ netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
error_unlock:
rtnl_unlock();
error_put:
- dev_put(netdev_vport->dev);
+ dev_put(vport->dev);
error_free_vport:
ovs_vport_free(vport);
-error:
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(ovs_netdev_link);
-static void free_port_rcu(struct rcu_head *rcu)
+static struct vport *netdev_create(const struct vport_parms *parms)
{
- struct netdev_vport *netdev_vport = container_of(rcu,
- struct netdev_vport, rcu);
+ struct vport *vport;
+
+ vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
- dev_put(netdev_vport->dev);
- ovs_vport_free(vport_from_priv(netdev_vport));
+ return ovs_netdev_link(vport, parms->name);
}
-void ovs_netdev_detach_dev(struct vport *vport)
+static void vport_netdev_free(struct rcu_head *rcu)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+ struct vport *vport = container_of(rcu, struct vport, rcu);
+
+ if (vport->dev)
+ dev_put(vport->dev);
+ ovs_vport_free(vport);
+}
+void ovs_netdev_detach_dev(struct vport *vport)
+{
ASSERT_RTNL();
- netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
- netdev_rx_handler_unregister(netdev_vport->dev);
- netdev_upper_dev_unlink(netdev_vport->dev,
- netdev_master_upper_dev_get(netdev_vport->dev));
- dev_set_promiscuity(netdev_vport->dev, -1);
+ vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+ netdev_rx_handler_unregister(vport->dev);
+ netdev_upper_dev_unlink(vport->dev,
+ netdev_master_upper_dev_get(vport->dev));
+ dev_set_promiscuity(vport->dev, -1);
}
+EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
static void netdev_destroy(struct vport *vport)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
rtnl_lock();
- if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+ if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
ovs_netdev_detach_dev(vport);
rtnl_unlock();
- call_rcu(&netdev_vport->rcu, free_port_rcu);
+ call_rcu(&vport->rcu, vport_netdev_free);
}
-const char *ovs_netdev_get_name(const struct vport *vport)
+void ovs_netdev_tunnel_destroy(struct vport *vport)
{
- const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- return netdev_vport->dev->name;
+ rtnl_lock();
+ if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+ ovs_netdev_detach_dev(vport);
+
+ /* Early release so we can unregister the device */
+ dev_put(vport->dev);
+ rtnl_delete_link(vport->dev);
+ vport->dev = NULL;
+ rtnl_unlock();
+
+ call_rcu(&vport->rcu, vport_netdev_free);
}
+EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
static unsigned int packet_length(const struct sk_buff *skb)
{
@@ -193,29 +200,26 @@ static unsigned int packet_length(const struct sk_buff *skb)
return length;
}
-static int netdev_send(struct vport *vport, struct sk_buff *skb)
+void ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
{
- struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
- int mtu = netdev_vport->dev->mtu;
- int len;
+ int mtu = vport->dev->mtu;
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
- netdev_vport->dev->name,
+ vport->dev->name,
packet_length(skb), mtu);
+ vport->dev->stats.tx_errors++;
goto drop;
}
- skb->dev = netdev_vport->dev;
- len = skb->len;
+ skb->dev = vport->dev;
dev_queue_xmit(skb);
-
- return len;
+ return;
drop:
kfree_skb(skb);
- return 0;
}
+EXPORT_SYMBOL_GPL(ovs_netdev_send);
/* Returns null if this device is not attached to a datapath. */
struct vport *ovs_netdev_get_vport(struct net_device *dev)
@@ -231,8 +235,7 @@ static struct vport_ops ovs_netdev_vport_ops = {
.type = OVS_VPORT_TYPE_NETDEV,
.create = netdev_create,
.destroy = netdev_destroy,
- .get_name = ovs_netdev_get_name,
- .send = netdev_send,
+ .send = ovs_netdev_send,
};
int __init ovs_netdev_init(void)
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index 6f7038e79..bf22fcedb 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -26,22 +26,12 @@
struct vport *ovs_netdev_get_vport(struct net_device *dev);
-struct netdev_vport {
- struct rcu_head rcu;
-
- struct net_device *dev;
-};
-
-static inline struct netdev_vport *
-netdev_vport_priv(const struct vport *vport)
-{
- return vport_priv(vport);
-}
-
-const char *ovs_netdev_get_name(const struct vport *);
+struct vport *ovs_netdev_link(struct vport *vport, const char *name);
+void ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
void ovs_netdev_detach_dev(struct vport *);
int __init ovs_netdev_init(void);
void ovs_netdev_exit(void);
+void ovs_netdev_tunnel_destroy(struct vport *vport);
#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 6d39766e7..e1c9c0888 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -17,94 +17,37 @@
* 02110-1301, USA
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/net.h>
-#include <linux/rculist.h>
-#include <linux/udp.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
#include <linux/module.h>
-
-#include <net/icmp.h>
-#include <net/ip.h>
#include <net/udp.h>
#include <net/ip_tunnels.h>
#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
#include <net/vxlan.h>
#include "datapath.h"
#include "vport.h"
-#include "vport-vxlan.h"
-
-/**
- * struct vxlan_port - Keeps track of open UDP ports
- * @vs: vxlan_sock created for the port.
- * @name: vport name.
- */
-struct vxlan_port {
- struct vxlan_sock *vs;
- char name[IFNAMSIZ];
- u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
-};
-
-static struct vport_ops ovs_vxlan_vport_ops;
-
-static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
-{
- return vport_priv(vport);
-}
+#include "vport-netdev.h"
-/* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
- struct vxlan_metadata *md)
-{
- struct ovs_tunnel_info tun_info;
- struct vxlan_port *vxlan_port;
- struct vport *vport = vs->data;
- struct iphdr *iph;
- struct ovs_vxlan_opts opts = {
- .gbp = md->gbp,
- };
- __be64 key;
- __be16 flags;
-
- flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
- vxlan_port = vxlan_vport(vport);
- if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
- flags |= TUNNEL_VXLAN_OPT;
-
- /* Save outer tunnel values */
- iph = ip_hdr(skb);
- key = cpu_to_be64(ntohl(md->vni) >> 8);
- ovs_flow_tun_info_init(&tun_info, iph,
- udp_hdr(skb)->source, udp_hdr(skb)->dest,
- key, flags, &opts, sizeof(opts));
-
- ovs_vport_receive(vport, skb, &tun_info);
-}
+static struct vport_ops ovs_vxlan_netdev_vport_ops;
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
{
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+ __be16 dst_port = vxlan->cfg.dst_port;
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
return -EMSGSIZE;
- if (vxlan_port->exts) {
+ if (vxlan->flags & VXLAN_F_GBP) {
struct nlattr *exts;
exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
if (!exts)
return -EMSGSIZE;
- if (vxlan_port->exts & VXLAN_F_GBP &&
+ if (vxlan->flags & VXLAN_F_GBP &&
nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
return -EMSGSIZE;
@@ -114,23 +57,14 @@ static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
return 0;
}
-static void vxlan_tnl_destroy(struct vport *vport)
-{
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
-
- vxlan_sock_release(vxlan_port->vs);
-
- ovs_vport_deferred_free(vport);
-}
-
-static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
[OVS_VXLAN_EXT_GBP] = { .type = NLA_FLAG, },
};
-static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
+ struct vxlan_config *conf)
{
- struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
- struct vxlan_port *vxlan_port;
+ struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
int err;
if (nla_len(attr) < sizeof(struct nlattr))
@@ -140,10 +74,8 @@ static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
if (err < 0)
return err;
- vxlan_port = vxlan_vport(vport);
-
if (exts[OVS_VXLAN_EXT_GBP])
- vxlan_port->exts |= VXLAN_F_GBP;
+ conf->flags |= VXLAN_F_GBP;
return 0;
}
@@ -152,166 +84,84 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct nlattr *options = parms->options;
- struct vxlan_port *vxlan_port;
- struct vxlan_sock *vs;
+ struct net_device *dev;
struct vport *vport;
struct nlattr *a;
- u16 dst_port;
int err;
+ struct vxlan_config conf = {
+ .no_share = true,
+ .flags = VXLAN_F_COLLECT_METADATA,
+ };
if (!options) {
err = -EINVAL;
goto error;
}
+
a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
if (a && nla_len(a) == sizeof(u16)) {
- dst_port = nla_get_u16(a);
+ conf.dst_port = htons(nla_get_u16(a));
} else {
/* Require destination port from userspace. */
err = -EINVAL;
goto error;
}
- vport = ovs_vport_alloc(sizeof(struct vxlan_port),
- &ovs_vxlan_vport_ops, parms);
+ vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
if (IS_ERR(vport))
return vport;
- vxlan_port = vxlan_vport(vport);
- strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
-
a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
if (a) {
- err = vxlan_configure_exts(vport, a);
+ err = vxlan_configure_exts(vport, a, &conf);
if (err) {
ovs_vport_free(vport);
goto error;
}
}
- vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
- vxlan_port->exts);
- if (IS_ERR(vs)) {
+ rtnl_lock();
+ dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
ovs_vport_free(vport);
- return (void *)vs;
+ return ERR_CAST(dev);
}
- vxlan_port->vs = vs;
+ dev_change_flags(dev, dev->flags | IFF_UP);
+ rtnl_unlock();
return vport;
-
error:
return ERR_PTR(err);
}
-static int vxlan_ext_gbp(struct sk_buff *skb)
+static struct vport *vxlan_create(const struct vport_parms *parms)
{
- const struct ovs_tunnel_info *tun_info;
- const struct ovs_vxlan_opts *opts;
-
- tun_info = OVS_CB(skb)->egress_tun_info;
- opts = tun_info->options;
-
- if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
- tun_info->options_len >= sizeof(*opts))
- return opts->gbp;
- else
- return 0;
-}
-
-static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
- struct net *net = ovs_dp_get_net(vport->dp);
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- struct sock *sk = vxlan_port->vs->sock->sk;
- __be16 dst_port = inet_sk(sk)->inet_sport;
- const struct ovs_key_ipv4_tunnel *tun_key;
- struct vxlan_metadata md = {0};
- struct rtable *rt;
- struct flowi4 fl;
- __be16 src_port;
- __be16 df;
- int err;
- u32 vxflags;
-
- if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
- err = -EINVAL;
- goto error;
- }
-
- tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
- rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
- if (IS_ERR(rt)) {
- err = PTR_ERR(rt);
- goto error;
- }
-
- df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
- htons(IP_DF) : 0;
-
- skb->ignore_df = 1;
-
- src_port = udp_flow_src_port(net, skb, 0, 0, true);
- md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
- md.gbp = vxlan_ext_gbp(skb);
- vxflags = vxlan_port->exts |
- (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
-
- err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
- tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
- src_port, dst_port,
- &md, false, vxflags);
- if (err < 0)
- ip_rt_put(rt);
- return err;
-error:
- kfree_skb(skb);
- return err;
-}
-
-static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *egress_tun_info)
-{
- struct net *net = ovs_dp_get_net(vport->dp);
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
- __be16 src_port;
- int port_min;
- int port_max;
-
- inet_get_local_port_range(net, &port_min, &port_max);
- src_port = udp_flow_src_port(net, skb, 0, 0, true);
+ struct vport *vport;
- return ovs_tunnel_get_egress_info(egress_tun_info, net,
- OVS_CB(skb)->egress_tun_info,
- IPPROTO_UDP, skb->mark,
- src_port, dst_port);
-}
+ vport = vxlan_tnl_create(parms);
+ if (IS_ERR(vport))
+ return vport;
-static const char *vxlan_get_name(const struct vport *vport)
-{
- struct vxlan_port *vxlan_port = vxlan_vport(vport);
- return vxlan_port->name;
+ return ovs_netdev_link(vport, parms->name);
}
-static struct vport_ops ovs_vxlan_vport_ops = {
- .type = OVS_VPORT_TYPE_VXLAN,
- .create = vxlan_tnl_create,
- .destroy = vxlan_tnl_destroy,
- .get_name = vxlan_get_name,
- .get_options = vxlan_get_options,
- .send = vxlan_tnl_send,
- .get_egress_tun_info = vxlan_get_egress_tun_info,
- .owner = THIS_MODULE,
+static struct vport_ops ovs_vxlan_netdev_vport_ops = {
+ .type = OVS_VPORT_TYPE_VXLAN,
+ .create = vxlan_create,
+ .destroy = ovs_netdev_tunnel_destroy,
+ .get_options = vxlan_get_options,
+ .send = ovs_netdev_send,
};
static int __init ovs_vxlan_tnl_init(void)
{
- return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+ return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
}
static void __exit ovs_vxlan_tnl_exit(void)
{
- ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+ ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
}
module_init(ovs_vxlan_tnl_init);
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
deleted file mode 100644
index 4b08233e7..000000000
--- a/net/openvswitch/vport-vxlan.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef VPORT_VXLAN_H
-#define VPORT_VXLAN_H 1
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct ovs_vxlan_opts {
- __u32 gbp;
-};
-
-#endif
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 067a3fff1..320c765ce 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -34,9 +34,6 @@
#include "vport.h"
#include "vport-internal_dev.h"
-static void ovs_vport_record_error(struct vport *,
- enum vport_err_type err_type);
-
static LIST_HEAD(vport_ops_list);
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
@@ -113,7 +110,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
struct vport *vport;
hlist_for_each_entry_rcu(vport, bucket, hash_node)
- if (!strcmp(name, vport->ops->get_name(vport)) &&
+ if (!strcmp(name, ovs_vport_name(vport)) &&
net_eq(ovs_dp_get_net(vport->dp), net))
return vport;
@@ -157,12 +154,6 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
return ERR_PTR(-EINVAL);
}
- vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!vport->percpu_stats) {
- kfree(vport);
- return ERR_PTR(-ENOMEM);
- }
-
return vport;
}
EXPORT_SYMBOL_GPL(ovs_vport_alloc);
@@ -183,7 +174,6 @@ void ovs_vport_free(struct vport *vport)
* it is safe to use raw dereference.
*/
kfree(rcu_dereference_raw(vport->upcall_portids));
- free_percpu(vport->percpu_stats);
kfree(vport);
}
EXPORT_SYMBOL_GPL(ovs_vport_free);
@@ -226,7 +216,7 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
}
bucket = hash_bucket(ovs_dp_get_net(vport->dp),
- vport->ops->get_name(vport));
+ ovs_vport_name(vport));
hlist_add_head_rcu(&vport->hash_node, bucket);
return vport;
}
@@ -290,41 +280,19 @@ void ovs_vport_del(struct vport *vport)
*/
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
{
- int i;
-
- memset(stats, 0, sizeof(*stats));
-
- /* We potentially have 2 sources of stats that need to be combined:
- * those we have collected (split into err_stats and percpu_stats) from
- * set_stats() and device error stats from netdev->get_stats() (for
- * errors that happen downstream and therefore aren't reported through
- * our vport_record_error() function).
- * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
- * netdev-stats can be directly read over netlink-ioctl.
- */
-
- stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors);
- stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors);
- stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
- stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
-
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *percpu_stats;
- struct pcpu_sw_netstats local_stats;
- unsigned int start;
-
- percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
-
- do {
- start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
- local_stats = *percpu_stats;
- } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
-
- stats->rx_bytes += local_stats.rx_bytes;
- stats->rx_packets += local_stats.rx_packets;
- stats->tx_bytes += local_stats.tx_bytes;
- stats->tx_packets += local_stats.tx_packets;
- }
+ const struct rtnl_link_stats64 *dev_stats;
+ struct rtnl_link_stats64 temp;
+
+ dev_stats = dev_get_stats(vport->dev, &temp);
+ stats->rx_errors = dev_stats->rx_errors;
+ stats->tx_errors = dev_stats->tx_errors;
+ stats->tx_dropped = dev_stats->tx_dropped;
+ stats->rx_dropped = dev_stats->rx_dropped;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets;
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets;
}
/**
@@ -468,94 +436,34 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
* Must be called with rcu_read_lock. The packet cannot be shared and
* skb->data should point to the Ethernet header.
*/
-void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
- const struct ovs_tunnel_info *tun_info)
+int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
+ const struct ip_tunnel_info *tun_info)
{
- struct pcpu_sw_netstats *stats;
struct sw_flow_key key;
int error;
- stats = this_cpu_ptr(vport->percpu_stats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len +
- (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
- u64_stats_update_end(&stats->syncp);
-
OVS_CB(skb)->input_vport = vport;
- OVS_CB(skb)->egress_tun_info = NULL;
+ OVS_CB(skb)->mru = 0;
+ if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
+ u32 mark;
+
+ mark = skb->mark;
+ skb_scrub_packet(skb, true);
+ skb->mark = mark;
+ tun_info = NULL;
+ }
+
/* Extract flow from 'skb' into 'key'. */
error = ovs_flow_key_extract(tun_info, skb, &key);
if (unlikely(error)) {
kfree_skb(skb);
- return;
+ return error;
}
ovs_dp_process_packet(skb, &key);
+ return 0;
}
EXPORT_SYMBOL_GPL(ovs_vport_receive);
-/**
- * ovs_vport_send - send a packet on a device
- *
- * @vport: vport on which to send the packet
- * @skb: skb to send
- *
- * Sends the given packet and returns the length of data sent. Either ovs
- * lock or rcu_read_lock must be held.
- */
-int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
-{
- int sent = vport->ops->send(vport, skb);
-
- if (likely(sent > 0)) {
- struct pcpu_sw_netstats *stats;
-
- stats = this_cpu_ptr(vport->percpu_stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += sent;
- u64_stats_update_end(&stats->syncp);
- } else if (sent < 0) {
- ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
- } else {
- ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
- }
- return sent;
-}
-
-/**
- * ovs_vport_record_error - indicate device error to generic stats layer
- *
- * @vport: vport that encountered the error
- * @err_type: one of enum vport_err_type types to indicate the error type
- *
- * If using the vport generic stats layer indicate that an error of the given
- * type has occurred.
- */
-static void ovs_vport_record_error(struct vport *vport,
- enum vport_err_type err_type)
-{
- switch (err_type) {
- case VPORT_E_RX_DROPPED:
- atomic_long_inc(&vport->err_stats.rx_dropped);
- break;
-
- case VPORT_E_RX_ERROR:
- atomic_long_inc(&vport->err_stats.rx_errors);
- break;
-
- case VPORT_E_TX_DROPPED:
- atomic_long_inc(&vport->err_stats.tx_dropped);
- break;
-
- case VPORT_E_TX_ERROR:
- atomic_long_inc(&vport->err_stats.tx_errors);
- break;
- }
-
-}
-
static void free_vport_rcu(struct rcu_head *rcu)
{
struct vport *vport = container_of(rcu, struct vport, rcu);
@@ -571,57 +479,3 @@ void ovs_vport_deferred_free(struct vport *vport)
call_rcu(&vport->rcu, free_vport_rcu);
}
EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
-
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
- struct net *net,
- const struct ovs_tunnel_info *tun_info,
- u8 ipproto,
- u32 skb_mark,
- __be16 tp_src,
- __be16 tp_dst)
-{
- const struct ovs_key_ipv4_tunnel *tun_key;
- struct rtable *rt;
- struct flowi4 fl;
-
- if (unlikely(!tun_info))
- return -EINVAL;
-
- tun_key = &tun_info->tunnel;
-
- /* Route lookup to get srouce IP address.
- * The process may need to be changed if the corresponding process
- * in vports ops changed.
- */
- rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
-
- ip_rt_put(rt);
-
- /* Generate egress_tun_info based on tun_info,
- * saddr, tp_src and tp_dst
- */
- __ovs_flow_tun_info_init(egress_tun_info,
- fl.saddr, tun_key->ipv4_dst,
- tun_key->ipv4_tos,
- tun_key->ipv4_ttl,
- tp_src, tp_dst,
- tun_key->tun_id,
- tun_key->tun_flags,
- tun_info->options,
- tun_info->options_len);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *info)
-{
- /* get_egress_tun_info() is only implemented on tunnel ports. */
- if (unlikely(!vport->ops->get_egress_tun_info))
- return -EINVAL;
-
- return vport->ops->get_egress_tun_info(vport, skb, info);
-}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bc85331a6..d341ad6f3 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -35,10 +35,6 @@ struct vport_parms;
/* The following definitions are for users of the vport subsytem: */
-struct vport_net {
- struct vport __rcu *gre_vport;
-};
-
int ovs_vport_init(void);
void ovs_vport_exit(void);
@@ -56,26 +52,6 @@ int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids);
int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
-int ovs_vport_send(struct vport *, struct sk_buff *);
-
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
- struct net *net,
- const struct ovs_tunnel_info *tun_info,
- u8 ipproto,
- u32 skb_mark,
- __be16 tp_src,
- __be16 tp_dst);
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
- struct ovs_tunnel_info *info);
-
-/* The following definitions are for implementers of vport devices: */
-
-struct vport_err_stats {
- atomic_long_t rx_dropped;
- atomic_long_t rx_errors;
- atomic_long_t tx_dropped;
- atomic_long_t tx_errors;
-};
/**
* struct vport_portids - array of netlink portids of a vport.
* must be protected by rcu.
@@ -101,12 +77,10 @@ struct vport_portids {
* @hash_node: Element in @dev_table hash table in vport.c.
* @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
* @ops: Class structure.
- * @percpu_stats: Points to per-CPU statistics used and maintained by vport
- * @err_stats: Points to error statistics used and maintained by vport
* @detach_list: list used for detaching vport in net-exit call.
*/
struct vport {
- struct rcu_head rcu;
+ struct net_device *dev;
struct datapath *dp;
struct vport_portids __rcu *upcall_portids;
u16 port_no;
@@ -115,10 +89,8 @@ struct vport {
struct hlist_node dp_hash_node;
const struct vport_ops *ops;
- struct pcpu_sw_netstats __percpu *percpu_stats;
-
- struct vport_err_stats err_stats;
struct list_head detach_list;
+ struct rcu_head rcu;
};
/**
@@ -155,11 +127,8 @@ struct vport_parms {
* @get_options: Appends vport-specific attributes for the configuration of an
* existing vport to a &struct sk_buff. May be %NULL for a vport that does not
* have any configuration.
- * @get_name: Get the device's name.
- * @send: Send a packet on the device. Returns the length of the packet sent,
+ * @send: Send a packet on the device.
* zero for dropped packets or negative for error.
- * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
- * a packet.
*/
struct vport_ops {
enum ovs_vport_type type;
@@ -171,24 +140,11 @@ struct vport_ops {
int (*set_options)(struct vport *, struct nlattr *);
int (*get_options)(const struct vport *, struct sk_buff *);
- /* Called with rcu_read_lock or ovs_mutex. */
- const char *(*get_name)(const struct vport *);
-
- int (*send)(struct vport *, struct sk_buff *);
- int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
- struct ovs_tunnel_info *);
-
+ void (*send)(struct vport *, struct sk_buff *);
struct module *owner;
struct list_head list;
};
-enum vport_err_type {
- VPORT_E_RX_DROPPED,
- VPORT_E_RX_ERROR,
- VPORT_E_TX_DROPPED,
- VPORT_E_TX_ERROR,
-};
-
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
const struct vport_parms *);
void ovs_vport_free(struct vport *);
@@ -225,8 +181,8 @@ static inline struct vport *vport_from_priv(void *priv)
return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
}
-void ovs_vport_receive(struct vport *, struct sk_buff *,
- const struct ovs_tunnel_info *);
+int ovs_vport_receive(struct vport *, struct sk_buff *,
+ const struct ip_tunnel_info *);
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
@@ -235,25 +191,17 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
}
+static inline const char *ovs_vport_name(struct vport *vport)
+{
+ return vport->dev->name;
+}
+
int ovs_vport_ops_register(struct vport_ops *ops);
void ovs_vport_ops_unregister(struct vport_ops *ops);
-static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
- const struct ovs_key_ipv4_tunnel *key,
- u32 mark,
- struct flowi4 *fl,
- u8 protocol)
+static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
{
- struct rtable *rt;
-
- memset(fl, 0, sizeof(*fl));
- fl->daddr = key->ipv4_dst;
- fl->saddr = key->ipv4_src;
- fl->flowi4_tos = RT_TOS(key->ipv4_tos);
- fl->flowi4_mark = mark;
- fl->flowi4_proto = protocol;
-
- rt = ip_route_output_key(net, fl);
- return rt;
+ vport->ops->send(vport, skb);
}
+
#endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7851b1222..27b2898f2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -92,6 +92,7 @@
#ifdef CONFIG_INET
#include <net/inet_common.h>
#endif
+#include <linux/bpf.h>
#include "internal.h"
@@ -520,13 +521,11 @@ static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
}
static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
- int tx_ring,
struct sk_buff_head *rb_queue)
{
struct tpacket_kbdq_core *pkc;
- pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
- GET_PBDQC_FROM_RB(&po->rx_ring);
+ pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
spin_lock_bh(&rb_queue->lock);
pkc->delete_blk_timer = 1;
@@ -1414,6 +1413,22 @@ static unsigned int fanout_demux_qm(struct packet_fanout *f,
return skb_get_queue_mapping(skb) % num;
}
+static unsigned int fanout_demux_bpf(struct packet_fanout *f,
+ struct sk_buff *skb,
+ unsigned int num)
+{
+ struct bpf_prog *prog;
+ unsigned int ret = 0;
+
+ rcu_read_lock();
+ prog = rcu_dereference(f->bpf_prog);
+ if (prog)
+ ret = BPF_PROG_RUN(prog, skb) % num;
+ rcu_read_unlock();
+
+ return ret;
+}
+
static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
{
return f->flags & (flag >> 8);
@@ -1458,6 +1473,10 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
case PACKET_FANOUT_ROLLOVER:
idx = fanout_demux_rollover(f, skb, 0, false, num);
break;
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
+ idx = fanout_demux_bpf(f, skb, num);
+ break;
}
if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
@@ -1506,6 +1525,103 @@ static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
return false;
}
+static void fanout_init_data(struct packet_fanout *f)
+{
+ switch (f->type) {
+ case PACKET_FANOUT_LB:
+ atomic_set(&f->rr_cur, 0);
+ break;
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
+ RCU_INIT_POINTER(f->bpf_prog, NULL);
+ break;
+ }
+}
+
+static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
+{
+ struct bpf_prog *old;
+
+ spin_lock(&f->lock);
+ old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
+ rcu_assign_pointer(f->bpf_prog, new);
+ spin_unlock(&f->lock);
+
+ if (old) {
+ synchronize_net();
+ bpf_prog_destroy(old);
+ }
+}
+
+static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
+ unsigned int len)
+{
+ struct bpf_prog *new;
+ struct sock_fprog fprog;
+ int ret;
+
+ if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+ return -EPERM;
+ if (len != sizeof(fprog))
+ return -EINVAL;
+ if (copy_from_user(&fprog, data, len))
+ return -EFAULT;
+
+ ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+ if (ret)
+ return ret;
+
+ __fanout_set_data_bpf(po->fanout, new);
+ return 0;
+}
+
+static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
+ unsigned int len)
+{
+ struct bpf_prog *new;
+ u32 fd;
+
+ if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+ return -EPERM;
+ if (len != sizeof(fd))
+ return -EINVAL;
+ if (copy_from_user(&fd, data, len))
+ return -EFAULT;
+
+ new = bpf_prog_get(fd);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+ if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
+ bpf_prog_put(new);
+ return -EINVAL;
+ }
+
+ __fanout_set_data_bpf(po->fanout, new);
+ return 0;
+}
+
+static int fanout_set_data(struct packet_sock *po, char __user *data,
+ unsigned int len)
+{
+ switch (po->fanout->type) {
+ case PACKET_FANOUT_CBPF:
+ return fanout_set_data_cbpf(po, data, len);
+ case PACKET_FANOUT_EBPF:
+ return fanout_set_data_ebpf(po, data, len);
+ default:
+ return -EINVAL;
+ };
+}
+
+static void fanout_release_data(struct packet_fanout *f)
+{
+ switch (f->type) {
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
+ __fanout_set_data_bpf(f, NULL);
+ };
+}
+
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
struct packet_sock *po = pkt_sk(sk);
@@ -1523,6 +1639,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
case PACKET_FANOUT_CPU:
case PACKET_FANOUT_RND:
case PACKET_FANOUT_QM:
+ case PACKET_FANOUT_CBPF:
+ case PACKET_FANOUT_EBPF:
break;
default:
return -EINVAL;
@@ -1565,10 +1683,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
match->id = id;
match->type = type;
match->flags = flags;
- atomic_set(&match->rr_cur, 0);
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
atomic_set(&match->sk_ref, 0);
+ fanout_init_data(match);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout;
@@ -1614,6 +1732,7 @@ static void fanout_release(struct sock *sk)
if (atomic_dec_and_test(&f->sk_ref)) {
list_del(&f->list);
dev_remove_pack(&f->prot_hook);
+ fanout_release_data(f);
kfree(f);
}
mutex_unlock(&fanout_mutex);
@@ -2784,22 +2903,40 @@ static int packet_release(struct socket *sock)
* Attach a packet hook.
*/
-static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
+static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ __be16 proto)
{
struct packet_sock *po = pkt_sk(sk);
struct net_device *dev_curr;
__be16 proto_curr;
bool need_rehook;
+ struct net_device *dev = NULL;
+ int ret = 0;
+ bool unlisted = false;
- if (po->fanout) {
- if (dev)
- dev_put(dev);
-
+ if (po->fanout)
return -EINVAL;
- }
lock_sock(sk);
spin_lock(&po->bind_lock);
+ rcu_read_lock();
+
+ if (name) {
+ dev = dev_get_by_name_rcu(sock_net(sk), name);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ } else if (ifindex) {
+ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
+ if (!dev) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
+ if (dev)
+ dev_hold(dev);
proto_curr = po->prot_hook.type;
dev_curr = po->prot_hook.dev;
@@ -2807,14 +2944,29 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
need_rehook = proto_curr != proto || dev_curr != dev;
if (need_rehook) {
- unregister_prot_hook(sk, true);
+ if (po->running) {
+ rcu_read_unlock();
+ __unregister_prot_hook(sk, true);
+ rcu_read_lock();
+ dev_curr = po->prot_hook.dev;
+ if (dev)
+ unlisted = !dev_get_by_index_rcu(sock_net(sk),
+ dev->ifindex);
+ }
po->num = proto;
po->prot_hook.type = proto;
- po->prot_hook.dev = dev;
- po->ifindex = dev ? dev->ifindex : 0;
- packet_cached_dev_assign(po, dev);
+ if (unlikely(unlisted)) {
+ dev_put(dev);
+ po->prot_hook.dev = NULL;
+ po->ifindex = -1;
+ packet_cached_dev_reset(po);
+ } else {
+ po->prot_hook.dev = dev;
+ po->ifindex = dev ? dev->ifindex : 0;
+ packet_cached_dev_assign(po, dev);
+ }
}
if (dev_curr)
dev_put(dev_curr);
@@ -2822,7 +2974,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
if (proto == 0 || !need_rehook)
goto out_unlock;
- if (!dev || (dev->flags & IFF_UP)) {
+ if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
register_prot_hook(sk);
} else {
sk->sk_err = ENETDOWN;
@@ -2831,9 +2983,10 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
}
out_unlock:
+ rcu_read_unlock();
spin_unlock(&po->bind_lock);
release_sock(sk);
- return 0;
+ return ret;
}
/*
@@ -2845,8 +2998,6 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
{
struct sock *sk = sock->sk;
char name[15];
- struct net_device *dev;
- int err = -ENODEV;
/*
* Check legality
@@ -2856,19 +3007,13 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
return -EINVAL;
strlcpy(name, uaddr->sa_data, sizeof(name));
- dev = dev_get_by_name(sock_net(sk), name);
- if (dev)
- err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
- return err;
+ return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
}
static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
struct sock *sk = sock->sk;
- struct net_device *dev = NULL;
- int err;
-
/*
* Check legality
@@ -2879,16 +3024,8 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
if (sll->sll_family != AF_PACKET)
return -EINVAL;
- if (sll->sll_ifindex) {
- err = -ENODEV;
- dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
- if (dev == NULL)
- goto out;
- }
- err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
-
-out:
- return err;
+ return packet_do_bind(sk, NULL, sll->sll_ifindex,
+ sll->sll_protocol ? : pkt_sk(sk)->num);
}
static struct proto packet_proto = {
@@ -3533,6 +3670,13 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return fanout_add(sk, val & 0xffff, val >> 16);
}
+ case PACKET_FANOUT_DATA:
+ {
+ if (!po->fanout)
+ return -EINVAL;
+
+ return fanout_set_data(po, optval, optlen);
+ }
case PACKET_TX_HAS_OFF:
{
unsigned int val;
@@ -4045,7 +4189,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (closing && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
- prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+ prb_shutdown_retire_blk_timer(po, rb_queue);
}
release_sock(sk);
diff --git a/net/packet/internal.h b/net/packet/internal.h
index e20b3e882..9ee46314b 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -79,7 +79,10 @@ struct packet_fanout {
u16 id;
u8 type;
u8 flags;
- atomic_t rr_cur;
+ union {
+ atomic_t rr_cur;
+ struct bpf_prog __rcu *bpf_prog;
+ };
struct list_head list;
struct sock *arr[PACKET_FANOUT_MAX];
spinlock_t lock;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 896834cd3..a2f28a6d4 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -438,6 +438,14 @@ static const struct proto_ops rds_proto_ops = {
.sendpage = sock_no_sendpage,
};
+static void rds_sock_destruct(struct sock *sk)
+{
+ struct rds_sock *rs = rds_sk_to_rs(sk);
+
+ WARN_ON((&rs->rs_item != rs->rs_item.next ||
+ &rs->rs_item != rs->rs_item.prev));
+}
+
static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
{
struct rds_sock *rs;
@@ -445,6 +453,7 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
sock_init_data(sock, sk);
sock->ops = &rds_proto_ops;
sk->sk_protocol = protocol;
+ sk->sk_destruct = rds_sock_destruct;
rs = rds_sk_to_rs(sk);
spin_lock_init(&rs->rs_lock);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 4ebd29c12..dd666fb9b 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -185,7 +185,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
ret = 0;
goto out;
}
- trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
+ trans = rds_trans_get_preferred(sock_net(sock->sk),
+ sin->sin_addr.s_addr);
if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index da6da57e5..49adeef80 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -70,7 +70,8 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
} while (0)
/* rcu read lock must be held or the connection spinlock */
-static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
+static struct rds_connection *rds_conn_lookup(struct net *net,
+ struct hlist_head *head,
__be32 laddr, __be32 faddr,
struct rds_transport *trans)
{
@@ -78,7 +79,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
hlist_for_each_entry_rcu(conn, head, c_hash_node) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
- conn->c_trans == trans) {
+ conn->c_trans == trans && net == rds_conn_net(conn)) {
ret = conn;
break;
}
@@ -117,7 +118,8 @@ static void rds_conn_reset(struct rds_connection *conn)
* For now they are not garbage collected once they're created. They
* are torn down as the module is removed, if ever.
*/
-static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
+static struct rds_connection *__rds_conn_create(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp,
int is_outgoing)
{
@@ -131,7 +133,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
goto new_conn;
rcu_read_lock();
- conn = rds_conn_lookup(head, laddr, faddr, trans);
+ conn = rds_conn_lookup(net, head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
@@ -157,6 +159,7 @@ new_conn:
conn->c_faddr = faddr;
spin_lock_init(&conn->c_lock);
conn->c_next_tx_seq = 1;
+ rds_conn_net_set(conn, net);
init_waitqueue_head(&conn->c_waitq);
INIT_LIST_HEAD(&conn->c_send_queue);
@@ -174,7 +177,7 @@ new_conn:
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
- loop_trans = rds_trans_get_preferred(faddr);
+ loop_trans = rds_trans_get_preferred(net, faddr);
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
@@ -187,6 +190,12 @@ new_conn:
}
}
+ if (trans == NULL) {
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = ERR_PTR(-ENODEV);
+ goto out;
+ }
+
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
@@ -237,7 +246,7 @@ new_conn:
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
found = NULL;
else
- found = rds_conn_lookup(head, laddr, faddr, trans);
+ found = rds_conn_lookup(net, head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
@@ -260,17 +269,19 @@ out:
return conn;
}
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp)
{
- return __rds_conn_create(laddr, faddr, trans, gfp, 0);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
}
EXPORT_SYMBOL_GPL(rds_conn_create);
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp)
{
- return __rds_conn_create(laddr, faddr, trans, gfp, 1);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
@@ -297,6 +308,8 @@ void rds_conn_shutdown(struct rds_connection *conn)
wait_event(conn->c_waitq,
!test_bit(RDS_IN_XMIT, &conn->c_flags));
+ wait_event(conn->c_waitq,
+ !test_bit(RDS_RECV_REFILL, &conn->c_flags));
conn->c_trans->conn_shutdown(conn);
rds_conn_reset(conn);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ba2dffeff..2d3f2ab47 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -99,8 +99,6 @@ static void rds_ib_dev_free(struct work_struct *work)
if (rds_ibdev->mr_pool)
rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
- if (rds_ibdev->mr)
- ib_dereg_mr(rds_ibdev->mr);
if (rds_ibdev->pd)
ib_dealloc_pd(rds_ibdev->pd);
@@ -164,12 +162,6 @@ static void rds_ib_add_one(struct ib_device *device)
goto put_dev;
}
- rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(rds_ibdev->mr)) {
- rds_ibdev->mr = NULL;
- goto put_dev;
- }
-
rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
if (IS_ERR(rds_ibdev->mr_pool)) {
rds_ibdev->mr_pool = NULL;
@@ -230,11 +222,10 @@ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
*
* This can be called at any time and can be racing with any other RDS path.
*/
-static void rds_ib_remove_one(struct ib_device *device)
+static void rds_ib_remove_one(struct ib_device *device, void *client_data)
{
- struct rds_ib_device *rds_ibdev;
+ struct rds_ib_device *rds_ibdev = client_data;
- rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (!rds_ibdev)
return;
@@ -317,7 +308,7 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len,
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
-static int rds_ib_laddr_check(__be32 addr)
+static int rds_ib_laddr_check(struct net *net, __be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
@@ -366,6 +357,7 @@ void rds_ib_exit(void)
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
+ rds_ib_fmr_exit();
}
struct rds_transport rds_ib_transport = {
@@ -401,10 +393,14 @@ int rds_ib_init(void)
INIT_LIST_HEAD(&rds_ib_devices);
- ret = ib_register_client(&rds_ib_client);
+ ret = rds_ib_fmr_init();
if (ret)
goto out;
+ ret = ib_register_client(&rds_ib_client);
+ if (ret)
+ goto out_fmr_exit;
+
ret = rds_ib_sysctl_init();
if (ret)
goto out_ibreg;
@@ -427,6 +423,8 @@ out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
rds_ib_unregister_client();
+out_fmr_exit:
+ rds_ib_fmr_exit();
out:
return ret;
}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 86d88ec5d..aae60fda7 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -100,7 +100,6 @@ struct rds_ib_connection {
/* alphabet soup, IBTA style */
struct rdma_cm_id *i_cm_id;
struct ib_pd *i_pd;
- struct ib_mr *i_mr;
struct ib_cq *i_send_cq;
struct ib_cq *i_recv_cq;
@@ -173,7 +172,6 @@ struct rds_ib_device {
struct list_head conn_list;
struct ib_device *dev;
struct ib_pd *pd;
- struct ib_mr *mr;
struct rds_ib_mr_pool *mr_pool;
unsigned int fmr_max_remaps;
unsigned int max_fmrs;
@@ -313,6 +311,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
void rds_ib_sync_mr(void *trans_private, int dir);
void rds_ib_free_mr(void *trans_private, int invalidate);
void rds_ib_flush_mrs(void);
+int rds_ib_fmr_init(void);
+void rds_ib_fmr_exit(void);
/* ib_recv.c */
int rds_ib_recv_init(void);
@@ -320,7 +320,7 @@ void rds_ib_recv_exit(void);
int rds_ib_recv(struct rds_connection *conn);
int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
-void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
void rds_ib_inc_free(struct rds_incoming *inc);
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 0da2a45b3..9043f5c04 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -135,7 +135,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
rds_ib_recv_init_ring(ic);
/* Post receive buffers - as a side effect, this will update
* the posted credit count. */
- rds_ib_recv_refill(conn, 1);
+ rds_ib_recv_refill(conn, 1, GFP_KERNEL);
/* Tune RNR behavior */
rds_ib_tune_rnr(ic, &qp_attr);
@@ -269,7 +269,6 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
- ic->i_mr = rds_ibdev->mr;
cq_attr.cqe = ic->i_send_ring.w_nr + 1;
ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
@@ -375,7 +374,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
rds_ib_recv_init_ack(ic);
- rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
+ rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
ic->i_send_cq, ic->i_recv_cq);
out:
@@ -448,8 +447,9 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
(unsigned long long)be64_to_cpu(lguid),
(unsigned long long)be64_to_cpu(fguid));
- conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
- GFP_KERNEL);
+ /* RDS/IB is not currently netns aware, thus init_net */
+ conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+ &rds_ib_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
@@ -639,6 +639,15 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
(atomic_read(&ic->i_signaled_sends) == 0));
tasklet_kill(&ic->i_recv_tasklet);
+ /* first destroy the ib state that generates callbacks */
+ if (ic->i_cm_id->qp)
+ rdma_destroy_qp(ic->i_cm_id);
+ if (ic->i_send_cq)
+ ib_destroy_cq(ic->i_send_cq);
+ if (ic->i_recv_cq)
+ ib_destroy_cq(ic->i_recv_cq);
+
+ /* then free the resources that ib callbacks use */
if (ic->i_send_hdrs)
ib_dma_free_coherent(dev,
ic->i_send_ring.w_nr *
@@ -662,12 +671,6 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
if (ic->i_recvs)
rds_ib_recv_clear_ring(ic);
- if (ic->i_cm_id->qp)
- rdma_destroy_qp(ic->i_cm_id);
- if (ic->i_send_cq)
- ib_destroy_cq(ic->i_send_cq);
- if (ic->i_recv_cq)
- ib_destroy_cq(ic->i_recv_cq);
rdma_destroy_id(ic->i_cm_id);
/*
@@ -678,7 +681,6 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
ic->i_cm_id = NULL;
ic->i_pd = NULL;
- ic->i_mr = NULL;
ic->i_send_cq = NULL;
ic->i_recv_cq = NULL;
ic->i_send_hdrs = NULL;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 657ba9f5d..251d1ce0b 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -83,6 +83,25 @@ struct rds_ib_mr_pool {
struct ib_fmr_attr fmr_attr;
};
+struct workqueue_struct *rds_ib_fmr_wq;
+
+int rds_ib_fmr_init(void)
+{
+ rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
+ if (!rds_ib_fmr_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/* By the time this is called all the IB devices should have been torn down and
+ * had their pools freed. As each pool is freed its work struct is waited on,
+ * so the pool flushing work queue should be idle by the time we get here.
+ */
+void rds_ib_fmr_exit(void)
+{
+ destroy_workqueue(rds_ib_fmr_wq);
+}
+
static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
@@ -151,12 +170,17 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
struct rds_ib_device *rds_ibdev_old;
rds_ibdev_old = rds_ib_get_device(ipaddr);
- if (rds_ibdev_old) {
+ if (!rds_ibdev_old)
+ return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+
+ if (rds_ibdev_old != rds_ibdev) {
rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
rds_ib_dev_put(rds_ibdev_old);
+ return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
}
+ rds_ib_dev_put(rds_ibdev_old);
- return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+ return 0;
}
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -336,8 +360,6 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
goto out_no_cigar;
}
- memset(ibmr, 0, sizeof(*ibmr));
-
ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
@@ -485,7 +507,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
/* FIXME we need a way to tell a r/w MR
* from a r/o MR */
- BUG_ON(irqs_disabled());
+ WARN_ON(!page->mapping && irqs_disabled());
set_page_dirty(page);
put_page(page);
}
@@ -523,11 +545,13 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
/*
* given an llist of mrs, put them all into the list_head for more processing
*/
-static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
+static unsigned int llist_append_to_list(struct llist_head *llist,
+ struct list_head *list)
{
struct rds_ib_mr *ibmr;
struct llist_node *node;
struct llist_node *next;
+ unsigned int count = 0;
node = llist_del_all(llist);
while (node) {
@@ -535,7 +559,9 @@ static void llist_append_to_list(struct llist_head *llist, struct list_head *lis
ibmr = llist_entry(node, struct rds_ib_mr, llnode);
list_add_tail(&ibmr->unmap_list, list);
node = next;
+ count++;
}
+ return count;
}
/*
@@ -576,7 +602,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list);
unsigned long unpinned = 0;
- unsigned int nfreed = 0, ncleaned = 0, free_goal;
+ unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
int ret = 0;
rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
@@ -618,8 +644,8 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
/* Get the list of all MRs to be dropped. Ordering matters -
* we want to put drop_list ahead of free_list.
*/
- llist_append_to_list(&pool->drop_list, &unmap_list);
- llist_append_to_list(&pool->free_list, &unmap_list);
+ dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
+ dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
if (free_all)
llist_append_to_list(&pool->clean_list, &unmap_list);
@@ -647,7 +673,6 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
kfree(ibmr);
nfreed++;
}
- ncleaned++;
}
if (!list_empty(&unmap_list)) {
@@ -673,7 +698,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
}
atomic_sub(unpinned, &pool->free_pinned);
- atomic_sub(ncleaned, &pool->dirty_count);
+ atomic_sub(dirty_to_clean, &pool->dirty_count);
atomic_sub(nfreed, &pool->item_count);
out:
@@ -710,16 +735,18 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
- atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- schedule_delayed_work(&pool->flush_worker, 10);
+ atomic_read(&pool->dirty_count) >= pool->max_items / 5)
+ queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
if (invalidate) {
if (likely(!in_interrupt())) {
rds_ib_flush_mr_pool(pool, 0, NULL);
} else {
/* We get here if the user created a MR marked
- * as use_once and invalidate at the same time. */
- schedule_delayed_work(&pool->flush_worker, 10);
+ * as use_once and invalidate at the same time.
+ */
+ queue_delayed_work(rds_ib_fmr_wq,
+ &pool->flush_worker, 10);
}
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cac5b4506..f43831e41 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -62,12 +62,12 @@ void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
sge = &recv->r_sge[0];
sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
sge->length = sizeof(struct rds_header);
- sge->lkey = ic->i_mr->lkey;
+ sge->lkey = ic->i_pd->local_dma_lkey;
sge = &recv->r_sge[1];
sge->addr = 0;
sge->length = RDS_FRAG_SIZE;
- sge->lkey = ic->i_mr->lkey;
+ sge->lkey = ic->i_pd->local_dma_lkey;
}
}
@@ -297,7 +297,7 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
}
static int rds_ib_recv_refill_one(struct rds_connection *conn,
- struct rds_ib_recv_work *recv, int prefill)
+ struct rds_ib_recv_work *recv, gfp_t gfp)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_sge *sge;
@@ -305,7 +305,7 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
gfp_t slab_mask = GFP_NOWAIT;
gfp_t page_mask = GFP_NOWAIT;
- if (prefill) {
+ if (gfp & __GFP_WAIT) {
slab_mask = GFP_KERNEL;
page_mask = GFP_HIGHUSER;
}
@@ -347,6 +347,24 @@ out:
return ret;
}
+static int acquire_refill(struct rds_connection *conn)
+{
+ return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
+}
+
+static void release_refill(struct rds_connection *conn)
+{
+ clear_bit(RDS_RECV_REFILL, &conn->c_flags);
+
+ /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
+ * hot path and finding waiters is very rare. We don't want to walk
+ * the system-wide hashed waitqueue buckets in the fast path only to
+ * almost never find waiters.
+ */
+ if (waitqueue_active(&conn->c_waitq))
+ wake_up_all(&conn->c_waitq);
+}
+
/*
* This tries to allocate and post unused work requests after making sure that
* they have all the allocations they need to queue received fragments into
@@ -354,15 +372,23 @@ out:
*
* -1 is returned if posting fails due to temporary resource exhaustion.
*/
-void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_recv_work *recv;
struct ib_recv_wr *failed_wr;
unsigned int posted = 0;
int ret = 0;
+ bool can_wait = !!(gfp & __GFP_WAIT);
u32 pos;
+ /* the goal here is to just make sure that someone, somewhere
+ * is posting buffers. If we can't get the refill lock,
+ * let them do their thing
+ */
+ if (!acquire_refill(conn))
+ return;
+
while ((prefill || rds_conn_up(conn)) &&
rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
if (pos >= ic->i_recv_ring.w_nr) {
@@ -372,7 +398,7 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
}
recv = &ic->i_recvs[pos];
- ret = rds_ib_recv_refill_one(conn, recv, prefill);
+ ret = rds_ib_recv_refill_one(conn, recv, gfp);
if (ret) {
break;
}
@@ -402,6 +428,24 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
if (ret)
rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
+
+ release_refill(conn);
+
+ /* if we're called from the softirq handler, we'll be GFP_NOWAIT.
+ * in this case the ring being low is going to lead to more interrupts
+ * and we can safely let the softirq code take care of it unless the
+ * ring is completely empty.
+ *
+ * if we're called from krdsd, we'll be GFP_KERNEL. In this case
+ * we might have raced with the softirq code while we had the refill
+ * lock held. Use rds_ib_ring_low() instead of ring_empty to decide
+ * if we should requeue.
+ */
+ if (rds_conn_up(conn) &&
+ ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
+ rds_ib_ring_empty(&ic->i_recv_ring))) {
+ queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
+ }
}
/*
@@ -520,7 +564,7 @@ void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
sge->addr = ic->i_ack_dma;
sge->length = sizeof(struct rds_header);
- sge->lkey = ic->i_mr->lkey;
+ sge->lkey = ic->i_pd->local_dma_lkey;
wr->sg_list = sge;
wr->num_sge = 1;
@@ -982,10 +1026,17 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
}
/*
- * It's very important that we only free this ring entry if we've truly
- * freed the resources allocated to the entry. The refilling path can
- * leak if we don't.
+ * rds_ib_process_recv() doesn't always consume the frag, and
+ * we might not have called it at all if the wc didn't indicate
+ * success. We already unmapped the frag's pages, though, and
+ * the following rds_ib_ring_free() call tells the refill path
+ * that it will not find an allocated frag here. Make sure we
+ * keep that promise by freeing a frag that's still on the ring.
*/
+ if (recv->r_frag) {
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
+ }
rds_ib_ring_free(&ic->i_recv_ring, 1);
}
}
@@ -1016,7 +1067,7 @@ void rds_ib_recv_tasklet_fn(unsigned long data)
rds_ib_stats_inc(s_ib_rx_ring_empty);
if (rds_ib_ring_low(&ic->i_recv_ring))
- rds_ib_recv_refill(conn, 0);
+ rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
}
int rds_ib_recv(struct rds_connection *conn)
@@ -1025,8 +1076,10 @@ int rds_ib_recv(struct rds_connection *conn)
int ret = 0;
rdsdebug("conn %p\n", conn);
- if (rds_conn_up(conn))
+ if (rds_conn_up(conn)) {
rds_ib_attempt_ack(ic);
+ rds_ib_recv_refill(conn, 0, GFP_KERNEL);
+ }
return ret;
}
@@ -1049,9 +1102,10 @@ int rds_ib_recv_init(void)
rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
sizeof(struct rds_page_frag),
0, SLAB_HWCACHE_ALIGN, NULL);
- if (!rds_ib_frag_slab)
+ if (!rds_ib_frag_slab) {
kmem_cache_destroy(rds_ib_incoming_slab);
- else
+ rds_ib_incoming_slab = NULL;
+ } else
ret = 0;
out:
return ret;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 5d0a704fa..4e8804708 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -202,9 +202,9 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
sge = &send->s_sge[0];
sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
sge->length = sizeof(struct rds_header);
- sge->lkey = ic->i_mr->lkey;
+ sge->lkey = ic->i_pd->local_dma_lkey;
- send->s_sge[1].lkey = ic->i_mr->lkey;
+ send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
}
}
@@ -709,6 +709,11 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
+ if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
+ ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
+ prev->s_wr.send_flags |= IB_SEND_SIGNALED;
+ nr_sig++;
+ }
ic->i_data_op = NULL;
}
@@ -813,7 +818,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
/* Convert our struct scatterlist to struct ib_sge */
send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
- send->s_sge[0].lkey = ic->i_mr->lkey;
+ send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
send->s_sge[0].addr, send->s_sge[0].length);
@@ -927,7 +932,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
send->s_sge[j].addr =
ib_sg_dma_address(ic->i_cm_id->device, scat);
send->s_sge[j].length = len;
- send->s_sge[j].lkey = ic->i_mr->lkey;
+ send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
sent += len;
rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 589935661..3df0295c6 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -125,12 +125,11 @@ free_attr:
kfree(dev_attr);
}
-static void rds_iw_remove_one(struct ib_device *device)
+static void rds_iw_remove_one(struct ib_device *device, void *client_data)
{
- struct rds_iw_device *rds_iwdev;
+ struct rds_iw_device *rds_iwdev = client_data;
struct rds_iw_cm_id *i_cm_id, *next;
- rds_iwdev = ib_get_client_data(device, &rds_iw_client);
if (!rds_iwdev)
return;
@@ -149,10 +148,7 @@ static void rds_iw_remove_one(struct ib_device *device)
if (rds_iwdev->mr)
ib_dereg_mr(rds_iwdev->mr);
- while (ib_dealloc_pd(rds_iwdev->pd)) {
- rdsdebug("Failed to dealloc pd %p\n", rds_iwdev->pd);
- msleep(1);
- }
+ ib_dealloc_pd(rds_iwdev->pd);
list_del(&rds_iwdev->list);
kfree(rds_iwdev);
@@ -218,7 +214,7 @@ static void rds_iw_ic_info(struct socket *sock, unsigned int len,
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
-static int rds_iw_laddr_check(__be32 addr)
+static int rds_iw_laddr_check(struct net *net, __be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 8f486fa32..a6553a6fb 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -398,8 +398,9 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
&dp->dp_saddr, &dp->dp_daddr,
RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
- conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport,
- GFP_KERNEL);
+ /* RDS/IW is not currently netns aware, thus init_net */
+ conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+ &rds_iw_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index dba8d0864..6a8fbd6e6 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -667,11 +667,12 @@ static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
struct ib_mr *mr;
int err;
- mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size);
+ mr = ib_alloc_mr(rds_iwdev->pd, IB_MR_TYPE_MEM_REG,
+ pool->max_message_size);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
- printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err);
+ printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed (err=%d)\n", err);
return err;
}
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 334fe98c5..86152ec3b 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -153,9 +153,10 @@ void rds_iw_send_init_ring(struct rds_iw_connection *ic)
sge->length = sizeof(struct rds_header);
sge->lkey = 0;
- send->s_mr = ib_alloc_fast_reg_mr(ic->i_pd, fastreg_message_size);
+ send->s_mr = ib_alloc_mr(ic->i_pd, IB_MR_TYPE_MEM_REG,
+ fastreg_message_size);
if (IS_ERR(send->s_mr)) {
- printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed\n");
+ printk(KERN_WARNING "RDS/IW: ib_alloc_mr failed\n");
break;
}
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 40084d843..4c93badea 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -435,9 +435,10 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
- if (zot_me)
+ if (zot_me) {
rds_destroy_mr(mr);
- rds_mr_put(mr);
+ rds_mr_put(mr);
+ }
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
@@ -451,7 +452,7 @@ void rds_rdma_free_op(struct rm_rdma_op *ro)
* is the case for a RDMA_READ which copies from remote
* to local memory */
if (!ro->op_write) {
- BUG_ON(irqs_disabled());
+ WARN_ON(!page->mapping && irqs_disabled());
set_page_dirty(page);
}
put_page(page);
@@ -658,6 +659,8 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
+ else
+ ret = 0;
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 208240836..b9b40af53 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -34,6 +34,7 @@
#include <rdma/rdma_cm.h>
#include "rdma_transport.h"
+#include "ib.h"
static struct rdma_cm_id *rds_rdma_listen_id;
@@ -82,8 +83,18 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- /* XXX worry about racing with listen acceptance */
- ret = trans->cm_initiate_connect(cm_id);
+ /* Connection could have been dropped so make sure the
+ * cm_id is valid before proceeding
+ */
+ if (conn) {
+ struct rds_ib_connection *ibic;
+
+ ibic = conn->c_transport_data;
+ if (ibic && ibic->i_cm_id == cm_id)
+ ret = trans->cm_initiate_connect(cm_id);
+ else
+ rds_conn_drop(conn);
+ }
break;
case RDMA_CM_EVENT_ESTABLISHED:
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 2260c1e43..afb4048d0 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -80,6 +80,7 @@ enum {
#define RDS_LL_SEND_FULL 0
#define RDS_RECONNECT_PENDING 1
#define RDS_IN_XMIT 2
+#define RDS_RECV_REFILL 3
struct rds_connection {
struct hlist_node c_hash_node;
@@ -128,8 +129,21 @@ struct rds_connection {
/* Protocol version */
unsigned int c_version;
+ possible_net_t c_net;
};
+static inline
+struct net *rds_conn_net(struct rds_connection *conn)
+{
+ return read_pnet(&conn->c_net);
+}
+
+static inline
+void rds_conn_net_set(struct rds_connection *conn, struct net *net)
+{
+ write_pnet(&conn->c_net, net);
+}
+
#define RDS_FLAG_CONG_BITMAP 0x01
#define RDS_FLAG_ACK_REQUIRED 0x02
#define RDS_FLAG_RETRANSMITTED 0x04
@@ -417,7 +431,7 @@ struct rds_transport {
unsigned int t_prefer_loopback:1;
unsigned int t_type;
- int (*laddr_check)(__be32 addr);
+ int (*laddr_check)(struct net *net, __be32 addr);
int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
void (*conn_free)(void *data);
int (*conn_connect)(struct rds_connection *conn);
@@ -608,9 +622,11 @@ struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
/* conn.c */
int rds_conn_init(void);
void rds_conn_exit(void);
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+ __be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
void rds_conn_shutdown(struct rds_connection *conn);
void rds_conn_destroy(struct rds_connection *conn);
@@ -795,7 +811,7 @@ void rds_connect_complete(struct rds_connection *conn);
/* transport.c */
int rds_trans_register(struct rds_transport *trans);
void rds_trans_unregister(struct rds_transport *trans);
-struct rds_transport *rds_trans_get_preferred(__be32 addr);
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
void rds_trans_put(struct rds_transport *trans);
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
diff --git a/net/rds/send.c b/net/rds/send.c
index e9430f537..4df61a515 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -282,26 +282,34 @@ restart:
/* The transport either sends the whole rdma or none of it */
if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
rm->m_final_op = &rm->rdma;
+ /* The transport owns the mapped memory for now.
+ * You can't unmap it while it's on the send queue
+ */
+ set_bit(RDS_MSG_MAPPED, &rm->m_flags);
ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
- if (ret)
+ if (ret) {
+ clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
+ wake_up_interruptible(&rm->m_flush_wait);
break;
+ }
conn->c_xmit_rdma_sent = 1;
- /* The transport owns the mapped memory for now.
- * You can't unmap it while it's on the send queue */
- set_bit(RDS_MSG_MAPPED, &rm->m_flags);
}
if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
rm->m_final_op = &rm->atomic;
+ /* The transport owns the mapped memory for now.
+ * You can't unmap it while it's on the send queue
+ */
+ set_bit(RDS_MSG_MAPPED, &rm->m_flags);
ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
- if (ret)
+ if (ret) {
+ clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
+ wake_up_interruptible(&rm->m_flush_wait);
break;
+ }
conn->c_xmit_atomic_sent = 1;
- /* The transport owns the mapped memory for now.
- * You can't unmap it while it's on the send queue */
- set_bit(RDS_MSG_MAPPED, &rm->m_flags);
}
/*
@@ -411,7 +419,8 @@ over_batch:
*/
if (ret == 0) {
smp_mb();
- if (!list_empty(&conn->c_send_queue) &&
+ if ((test_bit(0, &conn->c_map_queued) ||
+ !list_empty(&conn->c_send_queue)) &&
send_gen == conn->c_send_gen) {
rds_stats_inc(s_send_lock_queue_raced);
goto restart;
@@ -769,8 +778,22 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
while (!list_empty(&list)) {
rm = list_entry(list.next, struct rds_message, m_sock_item);
list_del_init(&rm->m_sock_item);
-
rds_message_wait(rm);
+
+ /* just in case the code above skipped this message
+ * because RDS_MSG_ON_CONN wasn't set, run it again here
+ * taking m_rs_lock is the only thing that keeps us
+ * from racing with ack processing.
+ */
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
+
+ spin_lock(&rs->rs_lock);
+ __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
+ spin_unlock(&rs->rs_lock);
+
+ rm->m_rs = NULL;
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+
rds_message_put(rm);
}
}
@@ -992,6 +1015,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
goto out;
}
+ if (payload_len > rds_sk_sndbuf(rs)) {
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
/* size of rm including all sgs */
ret = rds_rm_size(msg, payload_len);
if (ret < 0)
@@ -1023,7 +1051,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
conn = rs->rs_conn;
else {
- conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
+ conn = rds_conn_create_outgoing(sock_net(sock->sk),
+ rs->rs_bound_addr, daddr,
rs->rs_transport,
sock->sk->sk_allocation);
if (IS_ERR(conn)) {
@@ -1063,11 +1092,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
dport, &queued)) {
rds_stats_inc(s_send_queue_full);
- /* XXX make sure this is reasonable */
- if (payload_len > rds_sk_sndbuf(rs)) {
- ret = -EMSGSIZE;
- goto out;
- }
+
if (nonblock) {
ret = -EAGAIN;
goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index edac9ef2b..c42b60bf4 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -35,6 +35,9 @@
#include <linux/in.h>
#include <linux/module.h>
#include <net/tcp.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/tcp.h>
#include "rds.h"
#include "tcp.h"
@@ -189,9 +192,9 @@ out:
spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
}
-static int rds_tcp_laddr_check(__be32 addr)
+static int rds_tcp_laddr_check(struct net *net, __be32 addr)
{
- if (inet_addr_type(&init_net, addr) == RTN_LOCAL)
+ if (inet_addr_type(net, addr) == RTN_LOCAL)
return 0;
return -EADDRNOTAVAIL;
}
@@ -250,16 +253,7 @@ static void rds_tcp_destroy_conns(void)
}
}
-static void rds_tcp_exit(void)
-{
- rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
- rds_tcp_listen_stop();
- rds_tcp_destroy_conns();
- rds_trans_unregister(&rds_tcp_transport);
- rds_tcp_recv_exit();
- kmem_cache_destroy(rds_tcp_conn_slab);
-}
-module_exit(rds_tcp_exit);
+static void rds_tcp_exit(void);
struct rds_transport rds_tcp_transport = {
.laddr_check = rds_tcp_laddr_check,
@@ -281,6 +275,136 @@ struct rds_transport rds_tcp_transport = {
.t_prefer_loopback = 1,
};
+static int rds_tcp_netid;
+
+/* per-network namespace private data for this module */
+struct rds_tcp_net {
+ struct socket *rds_tcp_listen_sock;
+ struct work_struct rds_tcp_accept_w;
+};
+
+static void rds_tcp_accept_worker(struct work_struct *work)
+{
+ struct rds_tcp_net *rtn = container_of(work,
+ struct rds_tcp_net,
+ rds_tcp_accept_w);
+
+ while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
+ cond_resched();
+}
+
+void rds_tcp_accept_work(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ queue_work(rds_wq, &rtn->rds_tcp_accept_w);
+}
+
+static __net_init int rds_tcp_init_net(struct net *net)
+{
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
+ if (!rtn->rds_tcp_listen_sock) {
+ pr_warn("could not set up listen sock\n");
+ return -EAFNOSUPPORT;
+ }
+ INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
+ return 0;
+}
+
+static void __net_exit rds_tcp_exit_net(struct net *net)
+{
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ /* If rds_tcp_exit_net() is called as a result of netns deletion,
+ * the rds_tcp_kill_sock() device notifier would already have cleaned
+ * up the listen socket, thus there is no work to do in this function.
+ *
+ * If rds_tcp_exit_net() is called as a result of module unload,
+ * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
+ * we do need to clean up the listen socket here.
+ */
+ if (rtn->rds_tcp_listen_sock) {
+ rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+ rtn->rds_tcp_listen_sock = NULL;
+ flush_work(&rtn->rds_tcp_accept_w);
+ }
+}
+
+static struct pernet_operations rds_tcp_net_ops = {
+ .init = rds_tcp_init_net,
+ .exit = rds_tcp_exit_net,
+ .id = &rds_tcp_netid,
+ .size = sizeof(struct rds_tcp_net),
+};
+
+static void rds_tcp_kill_sock(struct net *net)
+{
+ struct rds_tcp_connection *tc, *_tc;
+ struct sock *sk;
+ LIST_HEAD(tmp_list);
+ struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+ rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+ rtn->rds_tcp_listen_sock = NULL;
+ flush_work(&rtn->rds_tcp_accept_w);
+ spin_lock_irq(&rds_tcp_conn_lock);
+ list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ struct net *c_net = read_pnet(&tc->conn->c_net);
+
+ if (net != c_net || !tc->t_sock)
+ continue;
+ list_move_tail(&tc->t_tcp_node, &tmp_list);
+ }
+ spin_unlock_irq(&rds_tcp_conn_lock);
+ list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
+ sk = tc->t_sock->sk;
+ sk->sk_prot->disconnect(sk, 0);
+ tcp_done(sk);
+ if (tc->conn->c_passive)
+ rds_conn_destroy(tc->conn->c_passive);
+ rds_conn_destroy(tc->conn);
+ }
+}
+
+static int rds_tcp_dev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ /* rds-tcp registers as a pernet subys, so the ->exit will only
+ * get invoked after network acitivity has quiesced. We need to
+ * clean up all sockets to quiesce network activity, and use
+ * the unregistration of the per-net loopback device as a trigger
+ * to start that cleanup.
+ */
+ if (event == NETDEV_UNREGISTER_FINAL &&
+ dev->ifindex == LOOPBACK_IFINDEX)
+ rds_tcp_kill_sock(dev_net(dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rds_tcp_dev_notifier = {
+ .notifier_call = rds_tcp_dev_event,
+ .priority = -10, /* must be called after other network notifiers */
+};
+
+static void rds_tcp_exit(void)
+{
+ rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+ unregister_pernet_subsys(&rds_tcp_net_ops);
+ if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
+ pr_warn("could not unregister rds_tcp_dev_notifier\n");
+ rds_tcp_destroy_conns();
+ rds_trans_unregister(&rds_tcp_transport);
+ rds_tcp_recv_exit();
+ kmem_cache_destroy(rds_tcp_conn_slab);
+}
+module_exit(rds_tcp_exit);
+
static int rds_tcp_init(void)
{
int ret;
@@ -293,6 +417,16 @@ static int rds_tcp_init(void)
goto out;
}
+ ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+ if (ret) {
+ pr_warn("could not register rds_tcp_dev_notifier\n");
+ goto out;
+ }
+
+ ret = register_pernet_subsys(&rds_tcp_net_ops);
+ if (ret)
+ goto out_slab;
+
ret = rds_tcp_recv_init();
if (ret)
goto out_slab;
@@ -301,19 +435,14 @@ static int rds_tcp_init(void)
if (ret)
goto out_recv;
- ret = rds_tcp_listen_init();
- if (ret)
- goto out_register;
-
rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
goto out;
-out_register:
- rds_trans_unregister(&rds_tcp_transport);
out_recv:
rds_tcp_recv_exit();
out_slab:
+ unregister_pernet_subsys(&rds_tcp_net_ops);
kmem_cache_destroy(rds_tcp_conn_slab);
out:
return ret;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 0dbdd3716..64f873c0c 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -52,6 +52,7 @@ u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
extern struct rds_transport rds_tcp_transport;
+void rds_tcp_accept_work(struct sock *sk);
/* tcp_connect.c */
int rds_tcp_conn_connect(struct rds_connection *conn);
@@ -59,9 +60,11 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn);
void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */
-int rds_tcp_listen_init(void);
-void rds_tcp_listen_stop(void);
+struct socket *rds_tcp_listen_init(struct net *);
+void rds_tcp_listen_stop(struct socket *);
void rds_tcp_listen_data_ready(struct sock *sk);
+int rds_tcp_accept_one(struct socket *sock);
+int rds_tcp_keepalive(struct socket *sock);
/* tcp_recv.c */
int rds_tcp_recv_init(void);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 973109c7b..5cb16875c 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -79,7 +79,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
struct sockaddr_in src, dest;
int ret;
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ ret = sock_create_kern(rds_conn_net(conn), PF_INET,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0)
goto out;
@@ -111,10 +112,12 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
ret = 0;
- if (ret == 0)
+ if (ret == 0) {
+ rds_tcp_keepalive(sock);
sock = NULL;
- else
+ } else {
rds_tcp_restore_callbacks(sock, conn->c_transport_data);
+ }
out:
if (sock)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0da49e344..444d78d0b 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -38,14 +38,7 @@
#include "rds.h"
#include "tcp.h"
-/*
- * cheesy, but simple..
- */
-static void rds_tcp_accept_worker(struct work_struct *work);
-static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
-static struct socket *rds_tcp_listen_sock;
-
-static int rds_tcp_keepalive(struct socket *sock)
+int rds_tcp_keepalive(struct socket *sock)
{
/* values below based on xs_udp_default_timeout */
int keepidle = 5; /* send a probe 'keepidle' secs after last data */
@@ -77,7 +70,7 @@ bail:
return ret;
}
-static int rds_tcp_accept_one(struct socket *sock)
+int rds_tcp_accept_one(struct socket *sock)
{
struct socket *new_sock = NULL;
struct rds_connection *conn;
@@ -85,8 +78,9 @@ static int rds_tcp_accept_one(struct socket *sock)
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp;
- ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
- sock->sk->sk_protocol, &new_sock);
+ ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+ sock->sk->sk_type, sock->sk->sk_protocol,
+ &new_sock);
if (ret)
goto out;
@@ -108,7 +102,8 @@ static int rds_tcp_accept_one(struct socket *sock)
&inet->inet_saddr, ntohs(inet->inet_sport),
&inet->inet_daddr, ntohs(inet->inet_dport));
- conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
+ conn = rds_conn_create(sock_net(sock->sk),
+ inet->inet_saddr, inet->inet_daddr,
&rds_tcp_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
@@ -148,12 +143,6 @@ out:
return ret;
}
-static void rds_tcp_accept_worker(struct work_struct *work)
-{
- while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
- cond_resched();
-}
-
void rds_tcp_listen_data_ready(struct sock *sk)
{
void (*ready)(struct sock *sk);
@@ -174,20 +163,20 @@ void rds_tcp_listen_data_ready(struct sock *sk)
* socket
*/
if (sk->sk_state == TCP_LISTEN)
- queue_work(rds_wq, &rds_tcp_listen_work);
+ rds_tcp_accept_work(sk);
out:
read_unlock(&sk->sk_callback_lock);
ready(sk);
}
-int rds_tcp_listen_init(void)
+struct socket *rds_tcp_listen_init(struct net *net)
{
struct sockaddr_in sin;
struct socket *sock = NULL;
int ret;
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret < 0)
goto out;
@@ -211,17 +200,15 @@ int rds_tcp_listen_init(void)
if (ret < 0)
goto out;
- rds_tcp_listen_sock = sock;
- sock = NULL;
+ return sock;
out:
if (sock)
sock_release(sock);
- return ret;
+ return NULL;
}
-void rds_tcp_listen_stop(void)
+void rds_tcp_listen_stop(struct socket *sock)
{
- struct socket *sock = rds_tcp_listen_sock;
struct sock *sk;
if (!sock)
@@ -242,5 +229,4 @@ void rds_tcp_listen_stop(void)
/* wait for accepts to stop and close the socket */
flush_workqueue(rds_wq);
sock_release(sock);
- rds_tcp_listen_sock = NULL;
}
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index fbc5ef88b..27a992154 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -214,8 +214,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
}
to_copy = min(tc->t_tinc_data_rem, left);
- pskb_pull(clone, offset);
- pskb_trim(clone, to_copy);
+ if (!pskb_pull(clone, offset) ||
+ pskb_trim(clone, to_copy)) {
+ pr_warn("rds_tcp_data_recv: pull/trim failed "
+ "left %zu data_rem %zu skb_len %d\n",
+ left, tc->t_tinc_data_rem, skb->len);
+ kfree_skb(clone);
+ desc->error = -ENOMEM;
+ goto out;
+ }
skb_queue_tail(&tinc->ti_skb_list, clone);
rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 83498e1c7..f3afd1d60 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -77,7 +77,7 @@ void rds_trans_put(struct rds_transport *trans)
module_put(trans->t_owner);
}
-struct rds_transport *rds_trans_get_preferred(__be32 addr)
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr)
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
@@ -90,7 +90,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr)
for (i = 0; i < RDS_TRANS_COUNT; i++) {
trans = transports[i];
- if (trans && (trans->laddr_check(addr) == 0) &&
+ if (trans && (trans->laddr_check(net, addr) == 0) &&
(!trans->t_owner || try_module_get(trans->t_owner))) {
ret = trans;
break;
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 4c10e7e6c..598d374f6 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -36,7 +36,8 @@ config RFKILL_REGULATOR
config RFKILL_GPIO
tristate "GPIO RFKILL driver"
- depends on RFKILL && GPIOLIB
+ depends on RFKILL
+ depends on GPIOLIB || COMPILE_TEST
default n
help
If you say yes here you get support of a generic gpio RFKILL
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f12149a29..b41e9ea2f 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -341,7 +341,15 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
{
struct rfkill *rfkill;
- rfkill_global_states[type].cur = blocked;
+ if (type == RFKILL_TYPE_ALL) {
+ int i;
+
+ for (i = 0; i < NUM_RFKILL_TYPES; i++)
+ rfkill_global_states[i].cur = blocked;
+ } else {
+ rfkill_global_states[type].cur = blocked;
+ }
+
list_for_each_entry(rfkill, &rfkill_list, node) {
if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index d5d58d919..93127220c 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -164,7 +164,6 @@ static int rfkill_gpio_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id rfkill_acpi_match[] = {
{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
- { "BCM2E39", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E40", RFKILL_TYPE_BLUETOOTH },
{ "BCM2E64", RFKILL_TYPE_BLUETOOTH },
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 43ec92680..06e7c4a37 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -27,7 +27,16 @@
#include <net/act_api.h>
#include <net/netlink.h>
-void tcf_hash_destroy(struct tc_action *a)
+static void free_tcf(struct rcu_head *head)
+{
+ struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
+
+ free_percpu(p->cpu_bstats);
+ free_percpu(p->cpu_qstats);
+ kfree(p);
+}
+
+static void tcf_hash_destroy(struct tc_action *a)
{
struct tcf_common *p = a->priv;
struct tcf_hashinfo *hinfo = a->ops->hinfo;
@@ -41,9 +50,8 @@ void tcf_hash_destroy(struct tc_action *a)
* gen_estimator est_timer() might access p->tcfc_lock
* or bstats, wait a RCU grace period before freeing p
*/
- kfree_rcu(p, tcfc_rcu);
+ call_rcu(&p->tcfc_rcu, free_tcf);
}
-EXPORT_SYMBOL(tcf_hash_destroy);
int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
{
@@ -231,15 +239,16 @@ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
if (est)
gen_kill_estimator(&pc->tcfc_bstats,
&pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ call_rcu(&pc->tcfc_rcu, free_tcf);
}
EXPORT_SYMBOL(tcf_hash_cleanup);
int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
- int size, int bind)
+ int size, int bind, bool cpustats)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+ int err = -ENOMEM;
if (unlikely(!p))
return -ENOMEM;
@@ -247,18 +256,32 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
if (bind)
p->tcfc_bindcnt = 1;
+ if (cpustats) {
+ p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+ if (!p->cpu_bstats) {
+err1:
+ kfree(p);
+ return err;
+ }
+ p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+ if (!p->cpu_qstats) {
+err2:
+ free_percpu(p->cpu_bstats);
+ goto err1;
+ }
+ }
spin_lock_init(&p->tcfc_lock);
INIT_HLIST_NODE(&p->tcfc_head);
p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
if (est) {
- int err = gen_new_estimator(&p->tcfc_bstats, NULL,
- &p->tcfc_rate_est,
- &p->tcfc_lock, est);
+ err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
+ &p->tcfc_rate_est,
+ &p->tcfc_lock, est);
if (err) {
- kfree(p);
- return err;
+ free_percpu(p->cpu_qstats);
+ goto err2;
}
}
@@ -616,10 +639,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0)
goto errout;
- if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
+ if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 ||
- gnet_stats_copy_queue(&d, NULL,
+ gnet_stats_copy_queue(&d, p->cpu_qstats,
&p->tcfc_qstats,
p->tcfc_qstats.qlen) < 0)
goto errout;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d0edeb7a1..559bfa011 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -37,25 +37,24 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
struct tcf_result *res)
{
struct tcf_bpf *prog = act->priv;
+ struct bpf_prog *filter;
int action, filter_res;
bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
if (unlikely(!skb_mac_header_was_set(skb)))
return TC_ACT_UNSPEC;
- spin_lock(&prog->tcf_lock);
-
- prog->tcf_tm.lastuse = jiffies;
- bstats_update(&prog->tcf_bstats, skb);
+ tcf_lastuse_update(&prog->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
- /* Needed here for accessing maps. */
rcu_read_lock();
+ filter = rcu_dereference(prog->filter);
if (at_ingress) {
__skb_push(skb, skb->mac_len);
- filter_res = BPF_PROG_RUN(prog->filter, skb);
+ filter_res = BPF_PROG_RUN(filter, skb);
__skb_pull(skb, skb->mac_len);
} else {
- filter_res = BPF_PROG_RUN(prog->filter, skb);
+ filter_res = BPF_PROG_RUN(filter, skb);
}
rcu_read_unlock();
@@ -77,7 +76,7 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
break;
case TC_ACT_SHOT:
action = filter_res;
- prog->tcf_qstats.drops++;
+ qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
break;
case TC_ACT_UNSPEC:
action = prog->tcf_action;
@@ -87,7 +86,6 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
break;
}
- spin_unlock(&prog->tcf_lock);
return action;
}
@@ -263,7 +261,10 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
struct tcf_bpf_cfg *cfg)
{
cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
- cfg->filter = prog->filter;
+ /* updates to prog->filter are prevented, since it's called either
+ * with rtnl lock or during final cleanup in rcu callback
+ */
+ cfg->filter = rcu_dereference_protected(prog->filter, 1);
cfg->bpf_ops = prog->bpf_ops;
cfg->bpf_name = prog->bpf_name;
@@ -278,7 +279,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct tc_act_bpf *parm;
struct tcf_bpf *prog;
bool is_bpf, is_ebpf;
- int ret;
+ int ret, res = 0;
if (!nla)
return -EINVAL;
@@ -287,45 +288,47 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
if (ret < 0)
return ret;
- is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
- is_ebpf = tb[TCA_ACT_BPF_FD];
-
- if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
- !tb[TCA_ACT_BPF_PARMS])
+ if (!tb[TCA_ACT_BPF_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
- memset(&cfg, 0, sizeof(cfg));
-
- ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
- tcf_bpf_init_from_efd(tb, &cfg);
- if (ret < 0)
- return ret;
-
if (!tcf_hash_check(parm->index, act, bind)) {
ret = tcf_hash_create(parm->index, est, act,
- sizeof(*prog), bind);
+ sizeof(*prog), bind, true);
if (ret < 0)
- goto destroy_fp;
+ return ret;
- ret = ACT_P_CREATED;
+ res = ACT_P_CREATED;
} else {
/* Don't override defaults. */
if (bind)
- goto destroy_fp;
+ return 0;
tcf_hash_release(act, bind);
- if (!replace) {
- ret = -EEXIST;
- goto destroy_fp;
- }
+ if (!replace)
+ return -EEXIST;
}
+ is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
+ is_ebpf = tb[TCA_ACT_BPF_FD];
+
+ if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
+ tcf_bpf_init_from_efd(tb, &cfg);
+ if (ret < 0)
+ goto out;
+
prog = to_bpf(act);
- spin_lock_bh(&prog->tcf_lock);
+ ASSERT_RTNL();
- if (ret != ACT_P_CREATED)
+ if (res != ACT_P_CREATED)
tcf_bpf_prog_fill_cfg(prog, &old);
prog->bpf_ops = cfg.bpf_ops;
@@ -337,19 +340,21 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
prog->bpf_fd = cfg.bpf_fd;
prog->tcf_action = parm->action;
- prog->filter = cfg.filter;
+ rcu_assign_pointer(prog->filter, cfg.filter);
- spin_unlock_bh(&prog->tcf_lock);
-
- if (ret == ACT_P_CREATED)
+ if (res == ACT_P_CREATED) {
tcf_hash_insert(act);
- else
+ } else {
+ /* make sure the program being replaced is no longer executing */
+ synchronize_rcu();
tcf_bpf_cfg_cleanup(&old);
+ }
- return ret;
+ return res;
+out:
+ if (res == ACT_P_CREATED)
+ tcf_hash_cleanup(act, est);
-destroy_fp:
- tcf_bpf_cfg_cleanup(&cfg);
return ret;
}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 295d14bd6..5019a47b9 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -37,6 +37,7 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
struct nf_conntrack_tuple tuple;
enum ip_conntrack_info ctinfo;
struct tcf_connmark_info *ca = a->priv;
+ struct nf_conntrack_zone zone;
struct nf_conn *c;
int proto;
@@ -70,7 +71,10 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
proto, &tuple))
goto out;
- thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+ zone.id = ca->zone;
+ zone.dir = NF_CT_DEFAULT_ZONE_DIR;
+
+ thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
if (!thash)
goto out;
@@ -108,7 +112,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*ci),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 4cd5cf1ae..b07c535ba 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -62,7 +62,8 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
parm = nla_data(tb[TCA_CSUM_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+ bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 7fffc2272..5c1b05170 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -28,14 +28,18 @@
#ifdef CONFIG_GACT_PROB
static int gact_net_rand(struct tcf_gact *gact)
{
- if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval)
+ smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+ if (prandom_u32() % gact->tcfg_pval)
return gact->tcf_action;
return gact->tcfg_paction;
}
static int gact_determ(struct tcf_gact *gact)
{
- if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
+ u32 pack = atomic_inc_return(&gact->packets);
+
+ smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+ if (pack % gact->tcfg_pval)
return gact->tcf_action;
return gact->tcfg_paction;
}
@@ -85,7 +89,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
#endif
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
+ bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
@@ -99,16 +104,19 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
gact = to_gact(a);
- spin_lock_bh(&gact->tcf_lock);
+ ASSERT_RTNL();
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
if (p_parm) {
gact->tcfg_paction = p_parm->paction;
- gact->tcfg_pval = p_parm->pval;
+ gact->tcfg_pval = max_t(u16, 1, p_parm->pval);
+ /* Make sure tcfg_pval is written before tcfg_ptype
+ * coupled with smp_rmb() in gact_net_rand() & gact_determ()
+ */
+ smp_wmb();
gact->tcfg_ptype = p_parm->ptype;
}
#endif
- spin_unlock_bh(&gact->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_hash_insert(a);
return ret;
@@ -118,23 +126,21 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_gact *gact = a->priv;
- int action = TC_ACT_SHOT;
+ int action = READ_ONCE(gact->tcf_action);
- spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
- if (gact->tcfg_ptype)
- action = gact_rand[gact->tcfg_ptype](gact);
- else
- action = gact->tcf_action;
-#else
- action = gact->tcf_action;
+ {
+ u32 ptype = READ_ONCE(gact->tcfg_ptype);
+
+ if (ptype)
+ action = gact_rand[ptype](gact);
+ }
#endif
- gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
- gact->tcf_bstats.packets++;
+ bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
if (action == TC_ACT_SHOT)
- gact->tcf_qstats.drops++;
- gact->tcf_tm.lastuse = jiffies;
- spin_unlock(&gact->tcf_lock);
+ qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+
+ tcf_lastuse_update(&gact->tcf_tm);
return action;
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index cbc8dd7dd..99c9cc1c7 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -114,7 +114,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
index = nla_get_u32(tb[TCA_IPT_INDEX]);
if (!tcf_hash_check(index, a, bind) ) {
- ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+ ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index b1768198a..32fcdecdb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -31,13 +31,19 @@
#define MIRRED_TAB_MASK 7
static LIST_HEAD(mirred_list);
+static DEFINE_SPINLOCK(mirred_list_lock);
static void tcf_mirred_release(struct tc_action *a, int bind)
{
struct tcf_mirred *m = to_mirred(a);
+ struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+
+ /* We could be called either in a RCU callback or with RTNL lock held. */
+ spin_lock_bh(&mirred_list_lock);
list_del(&m->tcfm_list);
- if (m->tcfm_dev)
- dev_put(m->tcfm_dev);
+ spin_unlock_bh(&mirred_list_lock);
+ if (dev)
+ dev_put(dev);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -93,34 +99,37 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
if (!tcf_hash_check(parm->index, a, bind)) {
if (dev == NULL)
return -EINVAL;
- ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
+ bind, true);
if (ret)
return ret;
ret = ACT_P_CREATED;
} else {
if (bind)
return 0;
- if (!ovr) {
- tcf_hash_release(a, bind);
+
+ tcf_hash_release(a, bind);
+ if (!ovr)
return -EEXIST;
- }
}
m = to_mirred(a);
- spin_lock_bh(&m->tcf_lock);
+ ASSERT_RTNL();
m->tcf_action = parm->action;
m->tcfm_eaction = parm->eaction;
if (dev != NULL) {
m->tcfm_ifindex = parm->ifindex;
if (ret != ACT_P_CREATED)
- dev_put(m->tcfm_dev);
+ dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
dev_hold(dev);
- m->tcfm_dev = dev;
+ rcu_assign_pointer(m->tcfm_dev, dev);
m->tcfm_ok_push = ok_push;
}
- spin_unlock_bh(&m->tcf_lock);
+
if (ret == ACT_P_CREATED) {
+ spin_lock_bh(&mirred_list_lock);
list_add(&m->tcfm_list, &mirred_list);
+ spin_unlock_bh(&mirred_list_lock);
tcf_hash_insert(a);
}
@@ -133,20 +142,22 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
struct tcf_mirred *m = a->priv;
struct net_device *dev;
struct sk_buff *skb2;
+ int retval, err;
u32 at;
- int retval, err = 1;
- spin_lock(&m->tcf_lock);
- m->tcf_tm.lastuse = jiffies;
- bstats_update(&m->tcf_bstats, skb);
+ tcf_lastuse_update(&m->tcf_tm);
+
+ bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
- dev = m->tcfm_dev;
- if (!dev) {
- printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+ rcu_read_lock();
+ retval = READ_ONCE(m->tcf_action);
+ dev = rcu_dereference(m->tcfm_dev);
+ if (unlikely(!dev)) {
+ pr_notice_once("tc mirred: target device is gone\n");
goto out;
}
- if (!(dev->flags & IFF_UP)) {
+ if (unlikely(!(dev->flags & IFF_UP))) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name);
goto out;
@@ -154,7 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
at = G_TC_AT(skb->tc_verd);
skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL)
+ if (!skb2)
goto out;
if (!(at & AT_EGRESS)) {
@@ -171,16 +182,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
skb_sender_cpu_clear(skb2);
err = dev_queue_xmit(skb2);
-out:
if (err) {
- m->tcf_qstats.overlimits++;
+out:
+ qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
retval = TC_ACT_SHOT;
- else
- retval = m->tcf_action;
- } else
- retval = m->tcf_action;
- spin_unlock(&m->tcf_lock);
+ }
+ rcu_read_unlock();
return retval;
}
@@ -219,15 +227,20 @@ static int mirred_device_event(struct notifier_block *unused,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct tcf_mirred *m;
- if (event == NETDEV_UNREGISTER)
+ ASSERT_RTNL();
+ if (event == NETDEV_UNREGISTER) {
+ spin_lock_bh(&mirred_list_lock);
list_for_each_entry(m, &mirred_list, tcfm_list) {
- spin_lock_bh(&m->tcf_lock);
- if (m->tcfm_dev == dev) {
+ if (rcu_access_pointer(m->tcfm_dev) == dev) {
dev_put(dev);
- m->tcfm_dev = NULL;
+ /* Note : no rcu grace period necessary, as
+ * net_device are already rcu protected.
+ */
+ RCU_INIT_POINTER(m->tcfm_dev, NULL);
}
- spin_unlock_bh(&m->tcf_lock);
}
+ spin_unlock_bh(&mirred_list_lock);
+ }
return NOTIFY_DONE;
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 270a030d5..b7c4ead8b 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -55,7 +55,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
parm = nla_data(tb[TCA_NAT_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+ bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
@@ -161,7 +162,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
goto drop;
tcph = (void *)(skb_network_header(skb) + ihl);
- inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
+ inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
+ true);
break;
}
case IPPROTO_UDP:
@@ -177,7 +179,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
udph = (void *)(skb_network_header(skb) + ihl);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&udph->check, skb, addr,
- new_addr, 1);
+ new_addr, true);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
@@ -230,7 +232,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
iph->saddr = new_addr;
inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
- 0);
+ false);
break;
}
default:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index ff8b466a7..e38a7701f 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -57,7 +57,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (!tcf_hash_check(parm->index, a, bind)) {
if (!parm->nkeys)
return -EINVAL;
- ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+ bind, false);
if (ret)
return ret;
p = to_pedit(a);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 6a8d94886..d6b708d6a 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -103,7 +103,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
defdata = nla_data(tb[TCA_DEF_DATA]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index fcfeeaf83..6751b5f8c 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,7 +99,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index d735ecf0b..796785e0b 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -116,7 +116,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
action = parm->v_action;
if (!tcf_hash_check(parm->index, a, bind)) {
- ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*v),
+ bind, false);
if (ret)
return ret;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index ea611b216..4c85bd3a7 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
- u32 classid;
-
- classid = task_cls_state(current)->classid;
-
- /*
- * Due to the nature of the classifier it is required to ignore all
- * packets originating from softirq context as accessing `current'
- * would lead to false results.
- *
- * This test assumes that all callers of dev_queue_xmit() explicitely
- * disable bh. Knowing this, it is possible to detect softirq based
- * calls by looking at the number of nested bh disable calls because
- * softirqs always disables bh.
- */
- if (in_serving_softirq()) {
- /* If there is an sk_classid we'll use that. */
- if (!skb->sk)
- return -1;
- classid = skb->sk->sk_classid;
- }
+ u32 classid = task_get_classid(skb);
if (!classid)
return -1;
-
if (!tcf_em_tree_match(skb, &head->ematches, NULL))
return -1;
res->classid = classid;
res->class = 0;
+
return tcf_exts_exec(skb, &head->exts, res);
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index bb2a0f529..536838b65 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -301,7 +301,7 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
keymask = f->keymask;
if (keymask & FLOW_KEYS_NEEDED)
- skb_flow_dissect_flow_keys(skb, &flow_keys);
+ skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
for (n = 0; n < f->nkeys; n++) {
key = ffs(keymask) - 1;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 2f3d03f99..57692947e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -129,7 +129,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
* so do it rather here.
*/
skb_key.basic.n_proto = skb->protocol;
- skb_flow_dissect(skb, &head->dissector, &skb_key);
+ skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 02fa82792..f9c9fc075 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -283,12 +283,22 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
-static void
-rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
+static void rsvp_delete_filter_rcu(struct rcu_head *head)
{
- tcf_unbind_filter(tp, &f->res);
+ struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+
tcf_exts_destroy(&f->exts);
- kfree_rcu(f, rcu);
+ kfree(f);
+}
+
+static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
+{
+ tcf_unbind_filter(tp, &f->res);
+ /* all classifiers are required to call tcf_exts_destroy() after rcu
+ * grace period, since converted-to-rcu actions are relying on that
+ * in cleanup() callback
+ */
+ call_rcu(&f->rcu, rsvp_delete_filter_rcu);
}
static bool rsvp_destroy(struct tcf_proto *tp, bool force)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index a557dbaf5..944c8ff45 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -27,6 +27,7 @@
struct tcindex_filter_result {
struct tcf_exts exts;
struct tcf_result res;
+ struct rcu_head rcu;
};
struct tcindex_filter {
@@ -133,8 +134,23 @@ static int tcindex_init(struct tcf_proto *tp)
return 0;
}
-static int
-tcindex_delete(struct tcf_proto *tp, unsigned long arg)
+static void tcindex_destroy_rexts(struct rcu_head *head)
+{
+ struct tcindex_filter_result *r;
+
+ r = container_of(head, struct tcindex_filter_result, rcu);
+ tcf_exts_destroy(&r->exts);
+}
+
+static void tcindex_destroy_fexts(struct rcu_head *head)
+{
+ struct tcindex_filter *f = container_of(head, struct tcindex_filter, rcu);
+
+ tcf_exts_destroy(&f->result.exts);
+ kfree(f);
+}
+
+static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
@@ -162,9 +178,14 @@ found:
rcu_assign_pointer(*walk, rtnl_dereference(f->next));
}
tcf_unbind_filter(tp, &r->res);
- tcf_exts_destroy(&r->exts);
+ /* all classifiers are required to call tcf_exts_destroy() after rcu
+ * grace period, since converted-to-rcu actions are relying on that
+ * in cleanup() callback
+ */
if (f)
- kfree_rcu(f, rcu);
+ call_rcu(&f->rcu, tcindex_destroy_fexts);
+ else
+ call_rcu(&r->rcu, tcindex_destroy_rexts);
return 0;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f06aa01d6..f43c8f33f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1806,51 +1806,45 @@ done:
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
*/
-int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res)
+int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
{
__be16 protocol = tc_skb_protocol(skb);
- int err;
+#ifdef CONFIG_NET_CLS_ACT
+ const struct tcf_proto *old_tp = tp;
+ int limit = 0;
+reclassify:
+#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
+ int err;
+
if (tp->protocol != protocol &&
tp->protocol != htons(ETH_P_ALL))
continue;
- err = tp->classify(skb, tp, res);
+ err = tp->classify(skb, tp, res);
+#ifdef CONFIG_NET_CLS_ACT
+ if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
+ goto reset;
+#endif
if (err >= 0)
return err;
}
- return -1;
-}
-EXPORT_SYMBOL(tc_classify_compat);
-int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res)
-{
- int err = 0;
-#ifdef CONFIG_NET_CLS_ACT
- const struct tcf_proto *otp = tp;
- int limit = 0;
-reclassify:
-#endif
-
- err = tc_classify_compat(skb, tp, res);
+ return -1;
#ifdef CONFIG_NET_CLS_ACT
- if (err == TC_ACT_RECLASSIFY) {
- tp = otp;
-
- if (unlikely(limit++ >= MAX_REC_LOOP)) {
- net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
- tp->q->ops->id,
- tp->prio & 0xffff,
- ntohs(tp->protocol));
- return TC_ACT_SHOT;
- }
- goto reclassify;
+reset:
+ if (unlikely(limit++ >= MAX_REC_LOOP)) {
+ net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
+ tp->q->ops->id, tp->prio & 0xffff,
+ ntohs(tp->protocol));
+ return TC_ACT_SHOT;
}
+
+ tp = old_tp;
+ goto reclassify;
#endif
- return err;
}
EXPORT_SYMBOL(tc_classify);
@@ -1947,6 +1941,7 @@ static int __init pktsched_init(void)
register_qdisc(&bfifo_qdisc_ops);
register_qdisc(&pfifo_head_drop_qdisc_ops);
register_qdisc(&mq_qdisc_ops);
+ register_qdisc(&noqueue_qdisc_ops);
rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e3e2cc5fd..1911af3ca 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -375,7 +375,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
list_for_each_entry(flow, &p->flows, list) {
fl = rcu_dereference_bh(flow->filter_list);
if (fl) {
- result = tc_classify_compat(skb, fl, &res);
+ result = tc_classify(skb, fl, &res, true);
if (result < 0)
continue;
flow = (struct atm_flow_data *)res.class;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index beeb75f80..c538d9e4a 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -240,7 +240,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/*
* Step 2+n. Apply classifier.
*/
- result = tc_classify_compat(skb, fl, &res);
+ result = tc_classify(skb, fl, &res, true);
if (!fl || result < 0)
goto fallback;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 6a783afe4..02bfd3d1c 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -170,13 +170,13 @@ static bool choke_match_flow(struct sk_buff *skb1,
if (!choke_skb_cb(skb1)->keys_valid) {
choke_skb_cb(skb1)->keys_valid = 1;
- skb_flow_dissect_flow_keys(skb1, &temp);
+ skb_flow_dissect_flow_keys(skb1, &temp, 0);
make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
}
if (!choke_skb_cb(skb2)->keys_valid) {
choke_skb_cb(skb2)->keys_valid = 1;
- skb_flow_dissect_flow_keys(skb2, &temp);
+ skb_flow_dissect_flow_keys(skb2, &temp, 0);
make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
}
@@ -201,7 +201,7 @@ static bool choke_classify(struct sk_buff *skb,
int result;
fl = rcu_dereference_bh(q->filter_list);
- result = tc_classify(skb, fl, &res);
+ result = tc_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 338706092..f26bdea87 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -331,7 +331,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
fl = rcu_dereference_bh(q->filter_list);
- result = tc_classify(skb, fl, &res);
+ result = tc_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 66700a611..c4d45fd8c 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -230,7 +230,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
else {
struct tcf_result res;
struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
- int result = tc_classify(skb, fl, &res);
+ int result = tc_classify(skb, fl, &res, false);
pr_debug("result %d class 0x%04x\n", result, res.classid);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 2e2398cfc..2177eac0a 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -54,7 +54,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
if (opt == NULL) {
- u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
+ u32 limit = qdisc_dev(sch)->tx_queue_len;
if (is_bfifo)
limit *= psched_mtu(qdisc_dev(sch));
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a9ba03043..4c834e93d 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -92,7 +92,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
return fq_codel_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, filter, &res);
+ result = tc_classify(skb, filter, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6efca3089..cb5d4ad32 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -416,33 +416,25 @@ struct Qdisc noop_qdisc = {
};
EXPORT_SYMBOL(noop_qdisc);
-static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+{
+ /* register_qdisc() assigns a default of noop_enqueue if unset,
+ * but __dev_queue_xmit() treats noqueue only as such
+ * if this is NULL - so clear it here. */
+ qdisc->enqueue = NULL;
+ return 0;
+}
+
+struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
.id = "noqueue",
.priv_size = 0,
+ .init = noqueue_init,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.peek = noop_dequeue,
.owner = THIS_MODULE,
};
-static struct Qdisc noqueue_qdisc;
-static struct netdev_queue noqueue_netdev_queue = {
- .qdisc = &noqueue_qdisc,
- .qdisc_sleeping = &noqueue_qdisc,
-};
-
-static struct Qdisc noqueue_qdisc = {
- .enqueue = NULL,
- .dequeue = noop_dequeue,
- .flags = TCQ_F_BUILTIN,
- .ops = &noqueue_qdisc_ops,
- .list = LIST_HEAD_INIT(noqueue_qdisc.list),
- .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
- .dev_queue = &noqueue_netdev_queue,
- .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock),
-};
-
-
static const u8 prio2band[TC_PRIO_MAX + 1] = {
1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
@@ -733,18 +725,19 @@ static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
- struct Qdisc *qdisc = &noqueue_qdisc;
+ struct Qdisc *qdisc;
+ const struct Qdisc_ops *ops = default_qdisc_ops;
- if (dev->tx_queue_len) {
- qdisc = qdisc_create_dflt(dev_queue,
- default_qdisc_ops, TC_H_ROOT);
- if (!qdisc) {
- netdev_info(dev, "activation failed\n");
- return;
- }
- if (!netif_is_multiqueue(dev))
- qdisc->flags |= TCQ_F_ONETXQUEUE;
+ if (dev->priv_flags & IFF_NO_QUEUE)
+ ops = &noqueue_qdisc_ops;
+
+ qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+ if (!qdisc) {
+ netdev_info(dev, "activation failed\n");
+ return;
}
+ if (!netif_is_multiqueue(dev))
+ qdisc->flags |= TCQ_F_ONETXQUEUE;
dev_queue->qdisc_sleeping = qdisc;
}
@@ -755,7 +748,8 @@ static void attach_default_qdiscs(struct net_device *dev)
txq = netdev_get_tx_queue(dev, 0);
- if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+ if (!netif_is_multiqueue(dev) ||
+ dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
dev->qdisc = txq->qdisc_sleeping;
atomic_inc(&dev->qdisc->refcnt);
@@ -779,7 +773,7 @@ static void transition_one_qdisc(struct net_device *dev,
clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
- if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
+ if (need_watchdog_p) {
dev_queue->trans_start = 0;
*need_watchdog_p = 1;
}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index abb9f2fec..80105109f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -512,11 +512,9 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_GRED_LIMIT])
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
- else {
- u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
-
- sch->limit = qlen * psched_mtu(qdisc_dev(sch));
- }
+ else
+ sch->limit = qdisc_dev(sch)->tx_queue_len
+ * psched_mtu(qdisc_dev(sch));
return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index e6c7416d0..b7ebe2c87 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1165,7 +1165,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
head = &q->root;
tcf = rcu_dereference_bh(q->root.filter_list);
- while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+ while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 9d15cb6b8..86b04e31e 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -368,6 +368,15 @@ static unsigned int hhf_drop(struct Qdisc *sch)
return bucket - q->buckets;
}
+static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
+{
+ unsigned int prev_backlog;
+
+ prev_backlog = sch->qstats.backlog;
+ hhf_drop(sch);
+ return prev_backlog - sch->qstats.backlog;
+}
+
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
@@ -696,7 +705,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.enqueue = hhf_enqueue,
.dequeue = hhf_dequeue,
.peek = qdisc_peek_dequeued,
- .drop = hhf_drop,
+ .drop = hhf_qdisc_drop,
.init = hhf_init,
.reset = hhf_reset,
.destroy = hhf_destroy,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f1acb0f60..15ccd7f8f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -229,7 +229,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+ while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
@@ -1048,11 +1048,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_HTB_DIRECT_QLEN])
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
- else {
+ else
q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
- if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
- q->direct_qlen = 2;
- }
+
if ((q->rate2quantum = gopt->rate2quantum) < 1)
q->rate2quantum = 1;
q->defcls = gopt->defcls;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 42dd21887..4e904ca0a 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -46,7 +46,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
int err;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- err = tc_classify(skb, fl, &res);
+ err = tc_classify(skb, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index ade9445a5..5abfe4467 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -130,12 +130,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
q->unplug_indefinite = false;
if (opt == NULL) {
- /* We will set a default limit of 100 pkts (~150kB)
- * in case tx_queue_len is not available. The
- * default value is completely arbitrary.
- */
- u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
- q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
+ q->limit = qdisc_dev(sch)->tx_queue_len
+ * psched_mtu(qdisc_dev(sch));
} else {
struct tc_plug_qopt *ctl = nla_data(opt);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8e5cd34aa..ba6487f27 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -42,7 +42,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
fl = rcu_dereference_bh(q->filter_list);
- err = tc_classify(skb, fl, &res);
+ err = tc_classify(skb, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index b8d73bca6..3dc3a6e56 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -186,7 +186,6 @@ struct qfq_sched {
u64 oldV, V; /* Precise virtual times. */
struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
- u32 num_active_agg; /* Num. of active aggregates */
u32 wsum; /* weight sum */
u32 iwsum; /* inverse weight sum */
@@ -718,7 +717,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
fl = rcu_dereference_bh(q->filter_list);
- result = tc_classify(skb, fl, &res);
+ result = tc_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 4b8151933..5bbb6332e 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -258,7 +258,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
struct tcf_result res;
int result;
- result = tc_classify(skb, fl, &res);
+ result = tc_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -502,7 +502,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
limit = ctl->limit;
if (limit == 0)
- limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+ limit = qdisc_dev(sch)->tx_queue_len;
child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
if (IS_ERR(child))
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 52f75a547..3abab534e 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -179,7 +179,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return sfq_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, fl, &res);
+ result = tc_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 197c3f59e..b00f1f961 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc,
* within this document.
*
* Our basic strategy is to round-robin transports in priorities
- * according to sctp_state_prio_map[] e.g., if no such
+ * according to sctp_trans_score() e.g., if no such
* transport with state SCTP_ACTIVE exists, round-robin through
* SCTP_UNKNOWN, etc. You get the picture.
*/
-static const u8 sctp_trans_state_to_prio_map[] = {
- [SCTP_ACTIVE] = 3, /* best case */
- [SCTP_UNKNOWN] = 2,
- [SCTP_PF] = 1,
- [SCTP_INACTIVE] = 0, /* worst case */
-};
-
static u8 sctp_trans_score(const struct sctp_transport *trans)
{
- return sctp_trans_state_to_prio_map[trans->state];
+ switch (trans->state) {
+ case SCTP_ACTIVE:
+ return 3; /* best case */
+ case SCTP_UNKNOWN:
+ return 2;
+ case SCTP_PF:
+ return 1;
+ default: /* case SCTP_INACTIVE */
+ return 0; /* worst case */
+ }
}
static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 3ac604f96..3d9ea9a48 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -487,23 +487,43 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
*/
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ struct net_device *odev;
+
if (!laddr->valid)
continue;
- if ((laddr->state == SCTP_ADDR_SRC) &&
- (AF_INET == laddr->a.sa.sa_family)) {
- fl4->fl4_sport = laddr->a.v4.sin_port;
- flowi4_update_output(fl4,
- asoc->base.sk->sk_bound_dev_if,
- RT_CONN_FLAGS(asoc->base.sk),
- daddr->v4.sin_addr.s_addr,
- laddr->a.v4.sin_addr.s_addr);
-
- rt = ip_route_output_key(sock_net(sk), fl4);
- if (!IS_ERR(rt)) {
- dst = &rt->dst;
- goto out_unlock;
- }
+ if (laddr->state != SCTP_ADDR_SRC ||
+ AF_INET != laddr->a.sa.sa_family)
+ continue;
+
+ fl4->fl4_sport = laddr->a.v4.sin_port;
+ flowi4_update_output(fl4,
+ asoc->base.sk->sk_bound_dev_if,
+ RT_CONN_FLAGS(asoc->base.sk),
+ daddr->v4.sin_addr.s_addr,
+ laddr->a.v4.sin_addr.s_addr);
+
+ rt = ip_route_output_key(sock_net(sk), fl4);
+ if (IS_ERR(rt))
+ continue;
+
+ if (!dst)
+ dst = &rt->dst;
+
+ /* Ensure the src address belongs to the output
+ * interface.
+ */
+ odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+ false);
+ if (!odev || odev->ifindex != fl4->flowi4_oif) {
+ if (&rt->dst != dst)
+ dst_release(&rt->dst);
+ continue;
}
+
+ if (dst != &rt->dst)
+ dst_release(dst);
+ dst = &rt->dst;
+ break;
}
out_unlock:
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index a655ddc3f..7954c52e1 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3090,8 +3090,19 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
sctp_assoc_set_primary(asoc, asconf->transport);
sctp_assoc_del_nonprimary_peers(asoc,
asconf->transport);
- } else
- sctp_assoc_del_peer(asoc, &addr);
+ return SCTP_ERROR_NO_ERROR;
+ }
+
+ /* If the address is not part of the association, the
+ * ASCONF-ACK with Error Cause Indication Parameter
+ * which including cause of Unresolvable Address should
+ * be sent.
+ */
+ peer = sctp_assoc_lookup_paddr(asoc, &addr);
+ if (!peer)
+ return SCTP_ERROR_DNS_FAILED;
+
+ sctp_assoc_rm_peer(asoc, peer);
break;
case SCTP_PARAM_SET_PRIMARY:
/* ADDIP Section 4.2.4
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 85e6f03ae..6098d4c42 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
int error;
struct sctp_transport *transport = (struct sctp_transport *) peer;
struct sctp_association *asoc = transport->asoc;
- struct net *net = sock_net(asoc->base.sk);
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
/* Check whether a task is in the sock. */
- bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
transport, GFP_ATOMIC);
if (error)
- asoc->base.sk->sk_err = -error;
+ sk->sk_err = -error;
out_unlock:
- bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(sk);
sctp_transport_put(transport);
}
@@ -285,11 +286,12 @@ out_unlock:
static void sctp_generate_timeout_event(struct sctp_association *asoc,
sctp_event_timeout_t timeout_type)
{
- struct net *net = sock_net(asoc->base.sk);
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
int error = 0;
- bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type);
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
(void *)timeout_type, GFP_ATOMIC);
if (error)
- asoc->base.sk->sk_err = -error;
+ sk->sk_err = -error;
out_unlock:
- bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(sk);
sctp_association_put(asoc);
}
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
int error = 0;
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
- struct net *net = sock_net(asoc->base.sk);
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
- bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
@@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
asoc->state, asoc->ep, asoc,
transport, GFP_ATOMIC);
- if (error)
- asoc->base.sk->sk_err = -error;
+ if (error)
+ sk->sk_err = -error;
out_unlock:
- bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(sk);
sctp_transport_put(transport);
}
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
{
struct sctp_transport *transport = (struct sctp_transport *) data;
struct sctp_association *asoc = transport->asoc;
- struct net *net = sock_net(asoc->base.sk);
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
- bh_lock_sock(asoc->base.sk);
- if (sock_owned_by_user(asoc->base.sk)) {
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy\n", __func__);
/* Try again later. */
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
out_unlock:
- bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(sk);
sctp_association_put(asoc);
}
@@ -954,7 +958,7 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
t = list_entry(pos, struct sctp_transport, transports);
if (!sctp_cmp_addr_exact(&t->ipaddr,
&asoc->peer.primary_addr)) {
- sctp_assoc_del_peer(asoc, &t->ipaddr);
+ sctp_assoc_rm_peer(asoc, t);
}
}
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3ee27b770..d7eaa7354 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -853,7 +853,7 @@ nomem:
/*
* Respond to a normal COOKIE ACK chunk.
- * We are the side that is being asked for an association.
+ * We are the side that is asking for an association.
*
* RFC 2960 5.1 Normal Establishment of an Association
*
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4feda2d0a..548240dd1 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -23,7 +23,7 @@ struct unx_cred {
};
#define uc_uid uc_base.cr_uid
-#define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2))
+#define UNX_WRITESLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME))
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2928afffb..4a2340a54 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -44,7 +44,7 @@ static void cache_revisit_request(struct cache_head *item);
static void cache_init(struct cache_head *h)
{
time_t now = seconds_since_boot();
- h->next = NULL;
+ INIT_HLIST_NODE(&h->cache_list);
h->flags = 0;
kref_init(&h->ref);
h->expiry_time = now + CACHE_NEW_EXPIRY;
@@ -54,15 +54,14 @@ static void cache_init(struct cache_head *h)
struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash)
{
- struct cache_head **head, **hp;
- struct cache_head *new = NULL, *freeme = NULL;
+ struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
+ struct hlist_head *head;
head = &detail->hash_table[hash];
read_lock(&detail->hash_lock);
- for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
- struct cache_head *tmp = *hp;
+ hlist_for_each_entry(tmp, head, cache_list) {
if (detail->match(tmp, key)) {
if (cache_is_expired(detail, tmp))
/* This entry is expired, we will discard it. */
@@ -88,12 +87,10 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
write_lock(&detail->hash_lock);
/* check if entry appeared while we slept */
- for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
- struct cache_head *tmp = *hp;
+ hlist_for_each_entry(tmp, head, cache_list) {
if (detail->match(tmp, key)) {
if (cache_is_expired(detail, tmp)) {
- *hp = tmp->next;
- tmp->next = NULL;
+ hlist_del_init(&tmp->cache_list);
detail->entries --;
freeme = tmp;
break;
@@ -104,8 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
return tmp;
}
}
- new->next = *head;
- *head = new;
+
+ hlist_add_head(&new->cache_list, head);
detail->entries++;
cache_get(new);
write_unlock(&detail->hash_lock);
@@ -143,7 +140,6 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
* If 'old' is not VALID, we update it directly,
* otherwise we need to replace it
*/
- struct cache_head **head;
struct cache_head *tmp;
if (!test_bit(CACHE_VALID, &old->flags)) {
@@ -168,15 +164,13 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
}
cache_init(tmp);
detail->init(tmp, old);
- head = &detail->hash_table[hash];
write_lock(&detail->hash_lock);
if (test_bit(CACHE_NEGATIVE, &new->flags))
set_bit(CACHE_NEGATIVE, &tmp->flags);
else
detail->update(tmp, new);
- tmp->next = *head;
- *head = tmp;
+ hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
detail->entries++;
cache_get(tmp);
cache_fresh_locked(tmp, new->expiry_time);
@@ -416,28 +410,29 @@ static int cache_clean(void)
/* find a non-empty bucket in the table */
while (current_detail &&
current_index < current_detail->hash_size &&
- current_detail->hash_table[current_index] == NULL)
+ hlist_empty(&current_detail->hash_table[current_index]))
current_index++;
/* find a cleanable entry in the bucket and clean it, or set to next bucket */
if (current_detail && current_index < current_detail->hash_size) {
- struct cache_head *ch, **cp;
+ struct cache_head *ch = NULL;
struct cache_detail *d;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
write_lock(&current_detail->hash_lock);
/* Ok, now to clean this strand */
- cp = & current_detail->hash_table[current_index];
- for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
+ head = &current_detail->hash_table[current_index];
+ hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
if (current_detail->nextcheck > ch->expiry_time)
current_detail->nextcheck = ch->expiry_time+1;
if (!cache_is_expired(current_detail, ch))
continue;
- *cp = ch->next;
- ch->next = NULL;
+ hlist_del_init(&ch->cache_list);
current_detail->entries--;
rv = 1;
break;
@@ -1270,18 +1265,13 @@ EXPORT_SYMBOL_GPL(qword_get);
* get a header, then pass each real item in the cache
*/
-struct handle {
- struct cache_detail *cd;
-};
-
-static void *c_start(struct seq_file *m, loff_t *pos)
+void *cache_seq_start(struct seq_file *m, loff_t *pos)
__acquires(cd->hash_lock)
{
loff_t n = *pos;
unsigned int hash, entry;
struct cache_head *ch;
- struct cache_detail *cd = ((struct handle*)m->private)->cd;
-
+ struct cache_detail *cd = m->private;
read_lock(&cd->hash_lock);
if (!n--)
@@ -1289,7 +1279,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
hash = n >> 32;
entry = n & ((1LL<<32) - 1);
- for (ch=cd->hash_table[hash]; ch; ch=ch->next)
+ hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
if (!entry--)
return ch;
n &= ~((1LL<<32) - 1);
@@ -1297,51 +1287,57 @@ static void *c_start(struct seq_file *m, loff_t *pos)
hash++;
n += 1LL<<32;
} while(hash < cd->hash_size &&
- cd->hash_table[hash]==NULL);
+ hlist_empty(&cd->hash_table[hash]));
if (hash >= cd->hash_size)
return NULL;
*pos = n+1;
- return cd->hash_table[hash];
+ return hlist_entry_safe(cd->hash_table[hash].first,
+ struct cache_head, cache_list);
}
+EXPORT_SYMBOL_GPL(cache_seq_start);
-static void *c_next(struct seq_file *m, void *p, loff_t *pos)
+void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
{
struct cache_head *ch = p;
int hash = (*pos >> 32);
- struct cache_detail *cd = ((struct handle*)m->private)->cd;
+ struct cache_detail *cd = m->private;
if (p == SEQ_START_TOKEN)
hash = 0;
- else if (ch->next == NULL) {
+ else if (ch->cache_list.next == NULL) {
hash++;
*pos += 1LL<<32;
} else {
++*pos;
- return ch->next;
+ return hlist_entry_safe(ch->cache_list.next,
+ struct cache_head, cache_list);
}
*pos &= ~((1LL<<32) - 1);
while (hash < cd->hash_size &&
- cd->hash_table[hash] == NULL) {
+ hlist_empty(&cd->hash_table[hash])) {
hash++;
*pos += 1LL<<32;
}
if (hash >= cd->hash_size)
return NULL;
++*pos;
- return cd->hash_table[hash];
+ return hlist_entry_safe(cd->hash_table[hash].first,
+ struct cache_head, cache_list);
}
+EXPORT_SYMBOL_GPL(cache_seq_next);
-static void c_stop(struct seq_file *m, void *p)
+void cache_seq_stop(struct seq_file *m, void *p)
__releases(cd->hash_lock)
{
- struct cache_detail *cd = ((struct handle*)m->private)->cd;
+ struct cache_detail *cd = m->private;
read_unlock(&cd->hash_lock);
}
+EXPORT_SYMBOL_GPL(cache_seq_stop);
static int c_show(struct seq_file *m, void *p)
{
struct cache_head *cp = p;
- struct cache_detail *cd = ((struct handle*)m->private)->cd;
+ struct cache_detail *cd = m->private;
if (p == SEQ_START_TOKEN)
return cd->cache_show(m, cd, NULL);
@@ -1364,33 +1360,36 @@ static int c_show(struct seq_file *m, void *p)
}
static const struct seq_operations cache_content_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
+ .start = cache_seq_start,
+ .next = cache_seq_next,
+ .stop = cache_seq_stop,
.show = c_show,
};
static int content_open(struct inode *inode, struct file *file,
struct cache_detail *cd)
{
- struct handle *han;
+ struct seq_file *seq;
+ int err;
if (!cd || !try_module_get(cd->owner))
return -EACCES;
- han = __seq_open_private(file, &cache_content_op, sizeof(*han));
- if (han == NULL) {
+
+ err = seq_open(file, &cache_content_op);
+ if (err) {
module_put(cd->owner);
- return -ENOMEM;
+ return err;
}
- han->cd = cd;
+ seq = file->private_data;
+ seq->private = cd;
return 0;
}
static int content_release(struct inode *inode, struct file *file,
struct cache_detail *cd)
{
- int ret = seq_release_private(inode, file);
+ int ret = seq_release(inode, file);
module_put(cd->owner);
return ret;
}
@@ -1665,17 +1664,21 @@ EXPORT_SYMBOL_GPL(cache_unregister_net);
struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
{
struct cache_detail *cd;
+ int i;
cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
if (cd == NULL)
return ERR_PTR(-ENOMEM);
- cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
+ cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
GFP_KERNEL);
if (cd->hash_table == NULL) {
kfree(cd);
return ERR_PTR(-ENOMEM);
}
+
+ for (i = 0; i < cd->hash_size; i++)
+ INIT_HLIST_HEAD(&cd->hash_table[i]);
cd->net = net;
return cd;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 337ca851a..f14f24ee9 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -1092,14 +1092,10 @@ void
rpc_destroy_mempool(void)
{
rpciod_stop();
- if (rpc_buffer_mempool)
- mempool_destroy(rpc_buffer_mempool);
- if (rpc_task_mempool)
- mempool_destroy(rpc_task_mempool);
- if (rpc_task_slabp)
- kmem_cache_destroy(rpc_task_slabp);
- if (rpc_buffer_slabp)
- kmem_cache_destroy(rpc_buffer_slabp);
+ mempool_destroy(rpc_buffer_mempool);
+ mempool_destroy(rpc_task_mempool);
+ kmem_cache_destroy(rpc_task_slabp);
+ kmem_cache_destroy(rpc_buffer_slabp);
rpc_destroy_wait_queue(&delay_queue);
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5a16d8d8c..a8f579df1 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -34,36 +34,19 @@
static void svc_unregister(const struct svc_serv *serv, struct net *net);
-#define svc_serv_is_pooled(serv) ((serv)->sv_function)
+#define svc_serv_is_pooled(serv) ((serv)->sv_ops->svo_function)
-/*
- * Mode for mapping cpus to pools.
- */
-enum {
- SVC_POOL_AUTO = -1, /* choose one of the others */
- SVC_POOL_GLOBAL, /* no mapping, just a single global pool
- * (legacy & UP mode) */
- SVC_POOL_PERCPU, /* one pool per cpu */
- SVC_POOL_PERNODE /* one pool per numa node */
-};
#define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
/*
* Structure for mapping cpus to pools and vice versa.
* Setup once during sunrpc initialisation.
*/
-static struct svc_pool_map {
- int count; /* How many svc_servs use us */
- int mode; /* Note: int not enum to avoid
- * warnings about "enumeration value
- * not handled in switch" */
- unsigned int npools;
- unsigned int *pool_to; /* maps pool id to cpu or node */
- unsigned int *to_pool; /* maps cpu or node to pool id */
-} svc_pool_map = {
- .count = 0,
+struct svc_pool_map svc_pool_map = {
.mode = SVC_POOL_DEFAULT
};
+EXPORT_SYMBOL_GPL(svc_pool_map);
+
static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
static int
@@ -236,7 +219,7 @@ svc_pool_map_init_pernode(struct svc_pool_map *m)
* vice versa). Initialise the map if we're the first user.
* Returns the number of pools.
*/
-static unsigned int
+unsigned int
svc_pool_map_get(void)
{
struct svc_pool_map *m = &svc_pool_map;
@@ -271,7 +254,7 @@ svc_pool_map_get(void)
mutex_unlock(&svc_pool_map_mutex);
return m->npools;
}
-
+EXPORT_SYMBOL_GPL(svc_pool_map_get);
/*
* Drop a reference to the global map of cpus to pools.
@@ -280,7 +263,7 @@ svc_pool_map_get(void)
* mode using the pool_mode module option without
* rebooting or re-loading sunrpc.ko.
*/
-static void
+void
svc_pool_map_put(void)
{
struct svc_pool_map *m = &svc_pool_map;
@@ -297,7 +280,7 @@ svc_pool_map_put(void)
mutex_unlock(&svc_pool_map_mutex);
}
-
+EXPORT_SYMBOL_GPL(svc_pool_map_put);
static int svc_pool_map_get_node(unsigned int pidx)
{
@@ -423,7 +406,7 @@ EXPORT_SYMBOL_GPL(svc_bind);
*/
static struct svc_serv *
__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
- void (*shutdown)(struct svc_serv *serv, struct net *net))
+ struct svc_serv_ops *ops)
{
struct svc_serv *serv;
unsigned int vers;
@@ -440,7 +423,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
bufsize = RPCSVC_MAXPAYLOAD;
serv->sv_max_payload = bufsize? bufsize : 4096;
serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
- serv->sv_shutdown = shutdown;
+ serv->sv_ops = ops;
xdrsize = 0;
while (prog) {
prog->pg_lovers = prog->pg_nvers-1;
@@ -486,26 +469,22 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
struct svc_serv *
svc_create(struct svc_program *prog, unsigned int bufsize,
- void (*shutdown)(struct svc_serv *serv, struct net *net))
+ struct svc_serv_ops *ops)
{
- return __svc_create(prog, bufsize, /*npools*/1, shutdown);
+ return __svc_create(prog, bufsize, /*npools*/1, ops);
}
EXPORT_SYMBOL_GPL(svc_create);
struct svc_serv *
svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
- void (*shutdown)(struct svc_serv *serv, struct net *net),
- svc_thread_fn func, struct module *mod)
+ struct svc_serv_ops *ops)
{
struct svc_serv *serv;
unsigned int npools = svc_pool_map_get();
- serv = __svc_create(prog, bufsize, npools, shutdown);
+ serv = __svc_create(prog, bufsize, npools, ops);
if (!serv)
goto out_err;
-
- serv->sv_function = func;
- serv->sv_module = mod;
return serv;
out_err:
svc_pool_map_put();
@@ -517,8 +496,8 @@ void svc_shutdown_net(struct svc_serv *serv, struct net *net)
{
svc_close_net(serv, net);
- if (serv->sv_shutdown)
- serv->sv_shutdown(serv, net);
+ if (serv->sv_ops->svo_shutdown)
+ serv->sv_ops->svo_shutdown(serv, net);
}
EXPORT_SYMBOL_GPL(svc_shutdown_net);
@@ -604,40 +583,52 @@ svc_release_buffer(struct svc_rqst *rqstp)
}
struct svc_rqst *
-svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
{
struct svc_rqst *rqstp;
rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
if (!rqstp)
- goto out_enomem;
+ return rqstp;
- serv->sv_nrthreads++;
__set_bit(RQ_BUSY, &rqstp->rq_flags);
spin_lock_init(&rqstp->rq_lock);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
- spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads++;
- list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
- spin_unlock_bh(&pool->sp_lock);
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp)
- goto out_thread;
+ goto out_enomem;
rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_resp)
- goto out_thread;
+ goto out_enomem;
if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
- goto out_thread;
+ goto out_enomem;
return rqstp;
-out_thread:
- svc_exit_thread(rqstp);
out_enomem:
- return ERR_PTR(-ENOMEM);
+ svc_rqst_free(rqstp);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(svc_rqst_alloc);
+
+struct svc_rqst *
+svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+{
+ struct svc_rqst *rqstp;
+
+ rqstp = svc_rqst_alloc(serv, pool, node);
+ if (!rqstp)
+ return ERR_PTR(-ENOMEM);
+
+ serv->sv_nrthreads++;
+ spin_lock_bh(&pool->sp_lock);
+ pool->sp_nrthreads++;
+ list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
+ spin_unlock_bh(&pool->sp_lock);
+ return rqstp;
}
EXPORT_SYMBOL_GPL(svc_prepare_thread);
@@ -739,12 +730,12 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
break;
}
- __module_get(serv->sv_module);
- task = kthread_create_on_node(serv->sv_function, rqstp,
+ __module_get(serv->sv_ops->svo_module);
+ task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
node, "%s", serv->sv_name);
if (IS_ERR(task)) {
error = PTR_ERR(task);
- module_put(serv->sv_module);
+ module_put(serv->sv_ops->svo_module);
svc_exit_thread(rqstp);
break;
}
@@ -772,15 +763,21 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads);
* mutex" for the service.
*/
void
-svc_exit_thread(struct svc_rqst *rqstp)
+svc_rqst_free(struct svc_rqst *rqstp)
{
- struct svc_serv *serv = rqstp->rq_server;
- struct svc_pool *pool = rqstp->rq_pool;
-
svc_release_buffer(rqstp);
kfree(rqstp->rq_resp);
kfree(rqstp->rq_argp);
kfree(rqstp->rq_auth_data);
+ kfree_rcu(rqstp, rq_rcu_head);
+}
+EXPORT_SYMBOL_GPL(svc_rqst_free);
+
+void
+svc_exit_thread(struct svc_rqst *rqstp)
+{
+ struct svc_serv *serv = rqstp->rq_server;
+ struct svc_pool *pool = rqstp->rq_pool;
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads--;
@@ -788,7 +785,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock);
- kfree_rcu(rqstp, rq_rcu_head);
+ svc_rqst_free(rqstp);
/* Release the server */
if (serv)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 163ac45c3..a6cbb2104 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -24,7 +24,6 @@ static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
static void svc_age_temp_xprts(unsigned long closure);
static void svc_delete_xprt(struct svc_xprt *xprt);
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt);
/* apparently the "standard" is that clients close
* idle connections after 5 minutes, servers after
@@ -225,12 +224,12 @@ static void svc_xprt_received(struct svc_xprt *xprt)
}
/* As soon as we clear busy, the xprt could be closed and
- * 'put', so we need a reference to call svc_xprt_do_enqueue with:
+ * 'put', so we need a reference to call svc_enqueue_xprt with:
*/
svc_xprt_get(xprt);
smp_mb__before_atomic();
clear_bit(XPT_BUSY, &xprt->xpt_flags);
- svc_xprt_do_enqueue(xprt);
+ xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
svc_xprt_put(xprt);
}
@@ -320,7 +319,7 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
return false;
}
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+void svc_xprt_do_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
struct svc_rqst *rqstp = NULL;
@@ -402,6 +401,7 @@ redo_search:
out:
trace_svc_xprt_do_enqueue(xprt, rqstp);
}
+EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
/*
* Queue up a transport with data pending. If there are idle nfsd
@@ -412,7 +412,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
{
if (test_bit(XPT_BUSY, &xprt->xpt_flags))
return;
- svc_xprt_do_enqueue(xprt);
+ xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 04ea91420..5318951b3 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -117,7 +117,7 @@ __frwr_recovery_worker(struct work_struct *work)
if (ib_dereg_mr(r->r.frmr.fr_mr))
goto out_fail;
- r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(pd, depth);
+ r->r.frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
if (IS_ERR(r->r.frmr.fr_mr))
goto out_fail;
@@ -148,7 +148,7 @@ __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
struct rpcrdma_frmr *f = &r->r.frmr;
int rc;
- f->fr_mr = ib_alloc_fast_reg_mr(pd, depth);
+ f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
if (IS_ERR(f->fr_mr))
goto out_mr_err;
f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth);
@@ -158,7 +158,7 @@ __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
out_mr_err:
rc = PTR_ERR(f->fr_mr);
- dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n",
+ dprintk("RPC: %s: ib_alloc_mr status %i\n",
__func__, rc);
return rc;
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 41985d07f..617b76f22 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -23,6 +23,21 @@ static int
physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
struct rpcrdma_create_data_internal *cdata)
{
+ struct ib_mr *mr;
+
+ /* Obtain an rkey to use for RPC data payloads.
+ */
+ mr = ib_get_dma_mr(ia->ri_pd,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
+ if (IS_ERR(mr)) {
+ pr_err("%s: ib_get_dma_mr for failed with %lX\n",
+ __func__, PTR_ERR(mr));
+ return -ENOMEM;
+ }
+
+ ia->ri_dma_mr = mr;
return 0;
}
@@ -51,7 +66,7 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
- seg->mr_rkey = ia->ri_bind_mem->rkey;
+ seg->mr_rkey = ia->ri_dma_mr->rkey;
seg->mr_base = seg->mr_dma;
seg->mr_nsegs = 1;
return 1;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 84ea37dae..bc8bd6577 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -71,6 +71,67 @@ static const char transfertypes[][12] = {
};
#endif
+/* The client can send a request inline as long as the RPCRDMA header
+ * plus the RPC call fit under the transport's inline limit. If the
+ * combined call message size exceeds that limit, the client must use
+ * the read chunk list for this operation.
+ */
+static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
+{
+ unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
+
+ return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
+}
+
+/* The client can't know how large the actual reply will be. Thus it
+ * plans for the largest possible reply for that particular ULP
+ * operation. If the maximum combined reply message size exceeds that
+ * limit, the client must provide a write list or a reply chunk for
+ * this request.
+ */
+static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
+{
+ unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
+
+ return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
+}
+
+static int
+rpcrdma_tail_pullup(struct xdr_buf *buf)
+{
+ size_t tlen = buf->tail[0].iov_len;
+ size_t skip = tlen & 3;
+
+ /* Do not include the tail if it is only an XDR pad */
+ if (tlen < 4)
+ return 0;
+
+ /* xdr_write_pages() adds a pad at the beginning of the tail
+ * if the content in "buf->pages" is unaligned. Force the
+ * tail's actual content to land at the next XDR position
+ * after the head instead.
+ */
+ if (skip) {
+ unsigned char *src, *dst;
+ unsigned int count;
+
+ src = buf->tail[0].iov_base;
+ dst = buf->head[0].iov_base;
+ dst += buf->head[0].iov_len;
+
+ src += skip;
+ tlen -= skip;
+
+ dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n",
+ __func__, skip, dst, src, tlen);
+
+ for (count = tlen; count; count--)
+ *dst++ = *src++;
+ }
+
+ return tlen;
+}
+
/*
* Chunk assembly from upper layer xdr_buf.
*
@@ -122,6 +183,10 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
if (len && n == nsegs)
return -EIO;
+ /* When encoding the read list, the tail is always sent inline */
+ if (type == rpcrdma_readch)
+ return n;
+
if (xdrbuf->tail[0].iov_len) {
/* the rpcrdma protocol allows us to omit any trailing
* xdr pad bytes, saving the server an RDMA operation. */
@@ -297,8 +362,7 @@ out:
* pre-registered memory buffer for this request. For small amounts
* of data, this is efficient. The cutoff value is tunable.
*/
-static int
-rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
+static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
{
int i, npages, curlen;
int copy_len;
@@ -310,16 +374,9 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
destp = rqst->rq_svec[0].iov_base;
curlen = rqst->rq_svec[0].iov_len;
destp += curlen;
- /*
- * Do optional padding where it makes sense. Alignment of write
- * payload can help the server, if our setting is accurate.
- */
- pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
- if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
- pad = 0; /* don't pad this request */
- dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
- __func__, pad, destp, rqst->rq_slen, curlen);
+ dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n",
+ __func__, destp, rqst->rq_slen, curlen);
copy_len = rqst->rq_snd_buf.page_len;
@@ -355,7 +412,6 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
page_base = 0;
}
/* header now contains entire send message */
- return pad;
}
/*
@@ -380,7 +436,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
char *base;
- size_t rpclen, padlen;
+ size_t rpclen;
ssize_t hdrlen;
enum rpcrdma_chunktype rtype, wtype;
struct rpcrdma_msg *headerp;
@@ -402,28 +458,15 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
/*
* Chunks needed for results?
*
+ * o Read ops return data as write chunk(s), header as inline.
* o If the expected result is under the inline threshold, all ops
- * return as inline (but see later).
+ * return as inline.
* o Large non-read ops return as a single reply chunk.
- * o Large read ops return data as write chunk(s), header as inline.
- *
- * Note: the NFS code sending down multiple result segments implies
- * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
- */
-
- /*
- * This code can handle read chunks, write chunks OR reply
- * chunks -- only one type. If the request is too big to fit
- * inline, then we will choose read chunks. If the request is
- * a READ, then use write chunks to separate the file data
- * into pages; otherwise use reply chunks.
*/
- if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
- wtype = rpcrdma_noch;
- else if (rqst->rq_rcv_buf.page_len == 0)
- wtype = rpcrdma_replych;
- else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
+ if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
wtype = rpcrdma_writech;
+ else if (rpcrdma_results_inline(rqst))
+ wtype = rpcrdma_noch;
else
wtype = rpcrdma_replych;
@@ -432,21 +475,25 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
*
* o If the total request is under the inline threshold, all ops
* are sent as inline.
- * o Large non-write ops are sent with the entire message as a
- * single read chunk (protocol 0-position special case).
* o Large write ops transmit data as read chunk(s), header as
* inline.
+ * o Large non-write ops are sent with the entire message as a
+ * single read chunk (protocol 0-position special case).
*
- * Note: the NFS code sending down multiple argument segments
- * implies the op is a write.
- * TBD check NFSv4 setacl
+ * This assumes that the upper layer does not present a request
+ * that both has a data payload, and whose non-data arguments
+ * by themselves are larger than the inline threshold.
*/
- if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
+ if (rpcrdma_args_inline(rqst)) {
rtype = rpcrdma_noch;
- else if (rqst->rq_snd_buf.page_len == 0)
- rtype = rpcrdma_areadch;
- else
+ } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
rtype = rpcrdma_readch;
+ } else {
+ r_xprt->rx_stats.nomsg_call_count++;
+ headerp->rm_type = htonl(RDMA_NOMSG);
+ rtype = rpcrdma_areadch;
+ rpclen = 0;
+ }
/* The following simplification is not true forever */
if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
@@ -458,7 +505,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
}
hdrlen = RPCRDMA_HDRLEN_MIN;
- padlen = 0;
/*
* Pull up any extra send data into the preregistered buffer.
@@ -467,45 +513,15 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
*/
if (rtype == rpcrdma_noch) {
- padlen = rpcrdma_inline_pullup(rqst,
- RPCRDMA_INLINE_PAD_VALUE(rqst));
-
- if (padlen) {
- headerp->rm_type = rdma_msgp;
- headerp->rm_body.rm_padded.rm_align =
- cpu_to_be32(RPCRDMA_INLINE_PAD_VALUE(rqst));
- headerp->rm_body.rm_padded.rm_thresh =
- cpu_to_be32(RPCRDMA_INLINE_PAD_THRESH);
- headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
- headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
- headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
- hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
- if (wtype != rpcrdma_noch) {
- dprintk("RPC: %s: invalid chunk list\n",
- __func__);
- return -EIO;
- }
- } else {
- headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
- headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
- headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
- /* new length after pullup */
- rpclen = rqst->rq_svec[0].iov_len;
- /*
- * Currently we try to not actually use read inline.
- * Reply chunks have the desirable property that
- * they land, packed, directly in the target buffers
- * without headers, so they require no fixup. The
- * additional RDMA Write op sends the same amount
- * of data, streams on-the-wire and adds no overhead
- * on receive. Therefore, we request a reply chunk
- * for non-writes wherever feasible and efficient.
- */
- if (wtype == rpcrdma_noch)
- wtype = rpcrdma_replych;
- }
- }
+ rpcrdma_inline_pullup(rqst);
+ headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
+ headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
+ headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
+ /* new length after pullup */
+ rpclen = rqst->rq_svec[0].iov_len;
+ } else if (rtype == rpcrdma_readch)
+ rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
if (rtype != rpcrdma_noch) {
hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
headerp, rtype);
@@ -518,9 +534,9 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
if (hdrlen < 0)
return hdrlen;
- dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
+ dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd"
" headerp 0x%p base 0x%p lkey 0x%x\n",
- __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
+ __func__, transfertypes[wtype], hdrlen, rpclen,
headerp, base, rdmab_lkey(req->rl_rdmabuf));
/*
@@ -534,26 +550,15 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
req->rl_send_iov[0].length = hdrlen;
req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
+ req->rl_niovs = 1;
+ if (rtype == rpcrdma_areadch)
+ return 0;
+
req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
req->rl_send_iov[1].length = rpclen;
req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
req->rl_niovs = 2;
-
- if (padlen) {
- struct rpcrdma_ep *ep = &r_xprt->rx_ep;
-
- req->rl_send_iov[2].addr = rdmab_addr(ep->rep_padbuf);
- req->rl_send_iov[2].length = padlen;
- req->rl_send_iov[2].lkey = rdmab_lkey(ep->rep_padbuf);
-
- req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
- req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
- req->rl_send_iov[3].lkey = rdmab_lkey(req->rl_sendbuf);
-
- req->rl_niovs = 4;
- }
-
return 0;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 96d886a86..f0c3ff67c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -115,15 +115,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
rqstp->rq_arg.tail[0].iov_len = 0;
}
-static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
-{
- if (!rdma_cap_read_multi_sge(xprt->sc_cm_id->device,
- xprt->sc_cm_id->port_num))
- return 1;
- else
- return min_t(int, sge_count, xprt->sc_max_sge);
-}
-
/* Issue an RDMA_READ using the local lkey to map the data sink */
int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
struct svc_rqst *rqstp,
@@ -144,8 +135,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
ctxt->direction = DMA_FROM_DEVICE;
ctxt->read_hdr = head;
- pages_needed =
- min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed));
+ pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
rs_length);
@@ -543,7 +533,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
rqstp->rq_arg.page_base = head->arg.page_base;
/* rq_respages starts after the last arg page */
- rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
+ rqstp->rq_respages = &rqstp->rq_pages[page_no];
rqstp->rq_next_page = rqstp->rq_respages + 1;
/* Rebuild rq_arg head and tail. */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 95412abc9..1dfae8317 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -136,6 +136,79 @@ static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
return dma_addr;
}
+/* Returns the address of the first read chunk or <nul> if no read chunk
+ * is present
+ */
+struct rpcrdma_read_chunk *
+svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
+{
+ struct rpcrdma_read_chunk *ch =
+ (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+
+ if (ch->rc_discrim == xdr_zero)
+ return NULL;
+ return ch;
+}
+
+/* Returns the address of the first read write array element or <nul>
+ * if no write array list is present
+ */
+static struct rpcrdma_write_array *
+svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
+{
+ if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
+ rmsgp->rm_body.rm_chunks[1] == xdr_zero)
+ return NULL;
+ return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
+}
+
+/* Returns the address of the first reply array element or <nul> if no
+ * reply array is present
+ */
+static struct rpcrdma_write_array *
+svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
+{
+ struct rpcrdma_read_chunk *rch;
+ struct rpcrdma_write_array *wr_ary;
+ struct rpcrdma_write_array *rp_ary;
+
+ /* XXX: Need to fix when reply chunk may occur with read list
+ * and/or write list.
+ */
+ if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
+ rmsgp->rm_body.rm_chunks[1] != xdr_zero)
+ return NULL;
+
+ rch = svc_rdma_get_read_chunk(rmsgp);
+ if (rch) {
+ while (rch->rc_discrim != xdr_zero)
+ rch++;
+
+ /* The reply chunk follows an empty write array located
+ * at 'rc_position' here. The reply array is at rc_target.
+ */
+ rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
+ goto found_it;
+ }
+
+ wr_ary = svc_rdma_get_write_array(rmsgp);
+ if (wr_ary) {
+ int chunk = be32_to_cpu(wr_ary->wc_nchunks);
+
+ rp_ary = (struct rpcrdma_write_array *)
+ &wr_ary->wc_array[chunk].wc_target.rs_length;
+ goto found_it;
+ }
+
+ /* No read list, no write list */
+ rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
+
+ found_it:
+ if (rp_ary->wc_discrim == xdr_zero)
+ return NULL;
+ return rp_ary;
+}
+
/* Assumptions:
* - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
*/
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 48f6de912..fcc3eb80c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -659,6 +659,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
if (xprt) {
set_bit(XPT_CLOSE, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
+ svc_xprt_put(xprt);
}
break;
default:
@@ -733,17 +734,19 @@ static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
struct ib_mr *mr;
struct ib_fast_reg_page_list *pl;
struct svc_rdma_fastreg_mr *frmr;
+ u32 num_sg;
frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
if (!frmr)
goto err;
- mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
+ num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len);
+ mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg);
if (IS_ERR(mr))
goto err_free_frmr;
pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
- RPCSVC_MAXPAGES);
+ num_sg);
if (IS_ERR(pl))
goto err_free_mr;
@@ -872,6 +875,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
* capabilities of this particular device */
newxprt->sc_max_sge = min((size_t)devattr.max_sge,
(size_t)RPCSVC_MAXPAGES);
+ newxprt->sc_max_sge_rd = min_t(size_t, devattr.max_sge_rd,
+ RPCSVC_MAXPAGES);
newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
(size_t)svcrdma_max_requests);
newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
@@ -1046,6 +1051,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
" remote_ip : %pI4\n"
" remote_port : %d\n"
" max_sge : %d\n"
+ " max_sge_rd : %d\n"
" sq_depth : %d\n"
" max_requests : %d\n"
" ord : %d\n",
@@ -1059,6 +1065,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
route.addr.dst_addr)->sin_port),
newxprt->sc_max_sge,
+ newxprt->sc_max_sge_rd,
newxprt->sc_sq_depth,
newxprt->sc_max_requests,
newxprt->sc_ord);
@@ -1201,40 +1208,6 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
return 1;
}
-/*
- * Attempt to register the kvec representing the RPC memory with the
- * device.
- *
- * Returns:
- * NULL : The device does not support fastreg or there were no more
- * fastreg mr.
- * frmr : The kvec register request was successfully posted.
- * <0 : An error was encountered attempting to register the kvec.
- */
-int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
- struct svc_rdma_fastreg_mr *frmr)
-{
- struct ib_send_wr fastreg_wr;
- u8 key;
-
- /* Bump the key */
- key = (u8)(frmr->mr->lkey & 0x000000FF);
- ib_update_fast_reg_key(frmr->mr, ++key);
-
- /* Prepare FASTREG WR */
- memset(&fastreg_wr, 0, sizeof fastreg_wr);
- fastreg_wr.opcode = IB_WR_FAST_REG_MR;
- fastreg_wr.send_flags = IB_SEND_SIGNALED;
- fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
- fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
- fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
- fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
- fastreg_wr.wr.fast_reg.length = frmr->map_len;
- fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
- fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
- return svc_rdma_send(xprt, &fastreg_wr);
-}
-
int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
{
struct ib_send_wr *bad_wr, *n_wr;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 680f888a9..41e452bc5 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -175,10 +175,8 @@ xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
}
static void
-xprt_rdma_format_addresses(struct rpc_xprt *xprt)
+xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap)
{
- struct sockaddr *sap = (struct sockaddr *)
- &rpcx_to_rdmad(xprt).addr;
char buf[128];
switch (sap->sa_family) {
@@ -272,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
xprt_clear_connected(xprt);
- rpcrdma_buffer_destroy(&r_xprt->rx_buf);
rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
+ rpcrdma_buffer_destroy(&r_xprt->rx_buf);
rpcrdma_ia_close(&r_xprt->rx_ia);
xprt_rdma_free_addresses(xprt);
@@ -302,7 +300,7 @@ xprt_setup_rdma(struct xprt_create *args)
struct rpc_xprt *xprt;
struct rpcrdma_xprt *new_xprt;
struct rpcrdma_ep *new_ep;
- struct sockaddr_in *sin;
+ struct sockaddr *sap;
int rc;
if (args->addrlen > sizeof(xprt->addr)) {
@@ -333,26 +331,20 @@ xprt_setup_rdma(struct xprt_create *args)
* Set up RDMA-specific connect data.
*/
- /* Put server RDMA address in local cdata */
- memcpy(&cdata.addr, args->dstaddr, args->addrlen);
+ sap = (struct sockaddr *)&cdata.addr;
+ memcpy(sap, args->dstaddr, args->addrlen);
/* Ensure xprt->addr holds valid server TCP (not RDMA)
* address, for any side protocols which peek at it */
xprt->prot = IPPROTO_TCP;
xprt->addrlen = args->addrlen;
- memcpy(&xprt->addr, &cdata.addr, xprt->addrlen);
+ memcpy(&xprt->addr, sap, xprt->addrlen);
- sin = (struct sockaddr_in *)&cdata.addr;
- if (ntohs(sin->sin_port) != 0)
+ if (rpc_get_port(sap))
xprt_set_bound(xprt);
- dprintk("RPC: %s: %pI4:%u\n",
- __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port));
-
- /* Set max requests */
cdata.max_requests = xprt->max_reqs;
- /* Set some length limits */
cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
@@ -375,8 +367,7 @@ xprt_setup_rdma(struct xprt_create *args)
new_xprt = rpcx_to_rdmax(xprt);
- rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr,
- xprt_rdma_memreg_strategy);
+ rc = rpcrdma_ia_open(new_xprt, sap, xprt_rdma_memreg_strategy);
if (rc)
goto out1;
@@ -409,7 +400,7 @@ xprt_setup_rdma(struct xprt_create *args)
INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
xprt_rdma_connect_worker);
- xprt_rdma_format_addresses(xprt);
+ xprt_rdma_format_addresses(xprt, sap);
xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
if (xprt->max_payload == 0)
goto out4;
@@ -420,6 +411,9 @@ xprt_setup_rdma(struct xprt_create *args)
if (!try_module_get(THIS_MODULE))
goto out4;
+ dprintk("RPC: %s: %s:%s\n", __func__,
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT]);
return xprt;
out4:
@@ -653,31 +647,30 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
if (xprt_connected(xprt))
idle_time = (long)(jiffies - xprt->last_used) / HZ;
- seq_printf(seq,
- "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
- "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
-
- 0, /* need a local port? */
- xprt->stat.bind_count,
- xprt->stat.connect_count,
- xprt->stat.connect_time,
- idle_time,
- xprt->stat.sends,
- xprt->stat.recvs,
- xprt->stat.bad_xids,
- xprt->stat.req_u,
- xprt->stat.bklog_u,
-
- r_xprt->rx_stats.read_chunk_count,
- r_xprt->rx_stats.write_chunk_count,
- r_xprt->rx_stats.reply_chunk_count,
- r_xprt->rx_stats.total_rdma_request,
- r_xprt->rx_stats.total_rdma_reply,
- r_xprt->rx_stats.pullup_copy_count,
- r_xprt->rx_stats.fixup_copy_count,
- r_xprt->rx_stats.hardway_register_count,
- r_xprt->rx_stats.failed_marshal_count,
- r_xprt->rx_stats.bad_reply_count);
+ seq_puts(seq, "\txprt:\trdma ");
+ seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ",
+ 0, /* need a local port? */
+ xprt->stat.bind_count,
+ xprt->stat.connect_count,
+ xprt->stat.connect_time,
+ idle_time,
+ xprt->stat.sends,
+ xprt->stat.recvs,
+ xprt->stat.bad_xids,
+ xprt->stat.req_u,
+ xprt->stat.bklog_u);
+ seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu\n",
+ r_xprt->rx_stats.read_chunk_count,
+ r_xprt->rx_stats.write_chunk_count,
+ r_xprt->rx_stats.reply_chunk_count,
+ r_xprt->rx_stats.total_rdma_request,
+ r_xprt->rx_stats.total_rdma_reply,
+ r_xprt->rx_stats.pullup_copy_count,
+ r_xprt->rx_stats.fixup_copy_count,
+ r_xprt->rx_stats.hardway_register_count,
+ r_xprt->rx_stats.failed_marshal_count,
+ r_xprt->rx_stats.bad_reply_count,
+ r_xprt->rx_stats.nomsg_call_count);
}
static int
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 891c4ede2..5502d4dad 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -52,6 +52,7 @@
#include <linux/prefetch.h>
#include <linux/sunrpc/addr.h>
#include <asm/bitops.h>
+#include <linux/module.h> /* try_module_get()/module_put() */
#include "xprt_rdma.h"
@@ -414,6 +415,14 @@ connected:
return 0;
}
+static void rpcrdma_destroy_id(struct rdma_cm_id *id)
+{
+ if (id) {
+ module_put(id->device->owner);
+ rdma_destroy_id(id);
+ }
+}
+
static struct rdma_cm_id *
rpcrdma_create_id(struct rpcrdma_xprt *xprt,
struct rpcrdma_ia *ia, struct sockaddr *addr)
@@ -440,6 +449,17 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
}
wait_for_completion_interruptible_timeout(&ia->ri_done,
msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
+
+ /* FIXME:
+ * Until xprtrdma supports DEVICE_REMOVAL, the provider must
+ * be pinned while there are active NFS/RDMA mounts to prevent
+ * hangs and crashes at umount time.
+ */
+ if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
+ dprintk("RPC: %s: Failed to get device module\n",
+ __func__);
+ ia->ri_async_rc = -ENODEV;
+ }
rc = ia->ri_async_rc;
if (rc)
goto out;
@@ -449,16 +469,17 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
if (rc) {
dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
__func__, rc);
- goto out;
+ goto put;
}
wait_for_completion_interruptible_timeout(&ia->ri_done,
msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
rc = ia->ri_async_rc;
if (rc)
- goto out;
+ goto put;
return id;
-
+put:
+ module_put(id->device->owner);
out:
rdma_destroy_id(id);
return ERR_PTR(rc);
@@ -493,9 +514,11 @@ rpcrdma_clean_cq(struct ib_cq *cq)
int
rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
{
- int rc, mem_priv;
struct rpcrdma_ia *ia = &xprt->rx_ia;
struct ib_device_attr *devattr = &ia->ri_devattr;
+ int rc;
+
+ ia->ri_dma_mr = NULL;
ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
if (IS_ERR(ia->ri_id)) {
@@ -519,17 +542,9 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
goto out3;
}
- if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
- ia->ri_have_dma_lkey = 1;
- ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
- }
-
if (memreg == RPCRDMA_FRMR) {
- /* Requires both frmr reg and local dma lkey */
- if (((devattr->device_cap_flags &
- (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
- (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
- (devattr->max_fast_reg_page_list_len == 0)) {
+ if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
+ (devattr->max_fast_reg_page_list_len == 0)) {
dprintk("RPC: %s: FRMR registration "
"not supported by HCA\n", __func__);
memreg = RPCRDMA_MTHCAFMR;
@@ -539,42 +554,20 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
if (!ia->ri_device->alloc_fmr) {
dprintk("RPC: %s: MTHCAFMR registration "
"not supported by HCA\n", __func__);
- memreg = RPCRDMA_ALLPHYSICAL;
+ rc = -EINVAL;
+ goto out3;
}
}
- /*
- * Optionally obtain an underlying physical identity mapping in
- * order to do a memory window-based bind. This base registration
- * is protected from remote access - that is enabled only by binding
- * for the specific bytes targeted during each RPC operation, and
- * revoked after the corresponding completion similar to a storage
- * adapter.
- */
switch (memreg) {
case RPCRDMA_FRMR:
ia->ri_ops = &rpcrdma_frwr_memreg_ops;
break;
case RPCRDMA_ALLPHYSICAL:
ia->ri_ops = &rpcrdma_physical_memreg_ops;
- mem_priv = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ;
- goto register_setup;
+ break;
case RPCRDMA_MTHCAFMR:
ia->ri_ops = &rpcrdma_fmr_memreg_ops;
- if (ia->ri_have_dma_lkey)
- break;
- mem_priv = IB_ACCESS_LOCAL_WRITE;
- register_setup:
- ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
- if (IS_ERR(ia->ri_bind_mem)) {
- printk(KERN_ALERT "%s: ib_get_dma_mr for "
- "phys register failed with %lX\n",
- __func__, PTR_ERR(ia->ri_bind_mem));
- rc = -ENOMEM;
- goto out3;
- }
break;
default:
printk(KERN_ERR "RPC: Unsupported memory "
@@ -592,7 +585,7 @@ out3:
ib_dealloc_pd(ia->ri_pd);
ia->ri_pd = NULL;
out2:
- rdma_destroy_id(ia->ri_id);
+ rpcrdma_destroy_id(ia->ri_id);
ia->ri_id = NULL;
out1:
return rc;
@@ -606,25 +599,17 @@ out1:
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
- int rc;
-
dprintk("RPC: %s: entering\n", __func__);
- if (ia->ri_bind_mem != NULL) {
- rc = ib_dereg_mr(ia->ri_bind_mem);
- dprintk("RPC: %s: ib_dereg_mr returned %i\n",
- __func__, rc);
- }
-
if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
if (ia->ri_id->qp)
rdma_destroy_qp(ia->ri_id);
- rdma_destroy_id(ia->ri_id);
+ rpcrdma_destroy_id(ia->ri_id);
ia->ri_id = NULL;
}
/* If the pd is still busy, xprtrdma missed freeing a resource */
if (ia->ri_pd && !IS_ERR(ia->ri_pd))
- WARN_ON(ib_dealloc_pd(ia->ri_pd));
+ ib_dealloc_pd(ia->ri_pd);
}
/*
@@ -639,6 +624,12 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
struct ib_cq_init_attr cq_attr = {};
int rc, err;
+ if (devattr->max_sge < RPCRDMA_MAX_IOVS) {
+ dprintk("RPC: %s: insufficient sge's available\n",
+ __func__);
+ return -ENOMEM;
+ }
+
/* check provider's send/recv wr limits */
if (cdata->max_requests > devattr->max_qp_wr)
cdata->max_requests = devattr->max_qp_wr;
@@ -651,21 +642,13 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
if (rc)
return rc;
ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
- ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
+ ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
ep->rep_attr.cap.max_recv_sge = 1;
ep->rep_attr.cap.max_inline_data = 0;
ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
ep->rep_attr.qp_type = IB_QPT_RC;
ep->rep_attr.port_num = ~0;
- if (cdata->padding) {
- ep->rep_padbuf = rpcrdma_alloc_regbuf(ia, cdata->padding,
- GFP_KERNEL);
- if (IS_ERR(ep->rep_padbuf))
- return PTR_ERR(ep->rep_padbuf);
- } else
- ep->rep_padbuf = NULL;
-
dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
"iovs: send %d recv %d\n",
__func__,
@@ -748,7 +731,8 @@ out2:
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, err);
out1:
- rpcrdma_free_regbuf(ia, ep->rep_padbuf);
+ if (ia->ri_dma_mr)
+ ib_dereg_mr(ia->ri_dma_mr);
return rc;
}
@@ -769,25 +753,32 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
cancel_delayed_work_sync(&ep->rep_connect_worker);
- if (ia->ri_id->qp) {
+ if (ia->ri_id->qp)
rpcrdma_ep_disconnect(ep, ia);
+
+ rpcrdma_clean_cq(ep->rep_attr.recv_cq);
+ rpcrdma_clean_cq(ep->rep_attr.send_cq);
+
+ if (ia->ri_id->qp) {
rdma_destroy_qp(ia->ri_id);
ia->ri_id->qp = NULL;
}
- rpcrdma_free_regbuf(ia, ep->rep_padbuf);
-
- rpcrdma_clean_cq(ep->rep_attr.recv_cq);
rc = ib_destroy_cq(ep->rep_attr.recv_cq);
if (rc)
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, rc);
- rpcrdma_clean_cq(ep->rep_attr.send_cq);
rc = ib_destroy_cq(ep->rep_attr.send_cq);
if (rc)
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, rc);
+
+ if (ia->ri_dma_mr) {
+ rc = ib_dereg_mr(ia->ri_dma_mr);
+ dprintk("RPC: %s: ib_dereg_mr returned %i\n",
+ __func__, rc);
+ }
}
/*
@@ -825,7 +816,7 @@ retry:
if (ia->ri_device != id->device) {
printk("RPC: %s: can't reconnect on "
"different device!\n", __func__);
- rdma_destroy_id(id);
+ rpcrdma_destroy_id(id);
rc = -ENETUNREACH;
goto out;
}
@@ -834,7 +825,7 @@ retry:
if (rc) {
dprintk("RPC: %s: rdma_create_qp failed %i\n",
__func__, rc);
- rdma_destroy_id(id);
+ rpcrdma_destroy_id(id);
rc = -ENETUNREACH;
goto out;
}
@@ -845,7 +836,7 @@ retry:
write_unlock(&ia->ri_qplock);
rdma_destroy_qp(old);
- rdma_destroy_id(old);
+ rpcrdma_destroy_id(old);
} else {
dprintk("RPC: %s: connecting...\n", __func__);
rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
@@ -1229,75 +1220,6 @@ rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
(unsigned long long)seg->mr_dma, seg->mr_dmalen);
}
-static int
-rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
- struct ib_mr **mrp, struct ib_sge *iov)
-{
- struct ib_phys_buf ipb;
- struct ib_mr *mr;
- int rc;
-
- /*
- * All memory passed here was kmalloc'ed, therefore phys-contiguous.
- */
- iov->addr = ib_dma_map_single(ia->ri_device,
- va, len, DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(ia->ri_device, iov->addr))
- return -ENOMEM;
-
- iov->length = len;
-
- if (ia->ri_have_dma_lkey) {
- *mrp = NULL;
- iov->lkey = ia->ri_dma_lkey;
- return 0;
- } else if (ia->ri_bind_mem != NULL) {
- *mrp = NULL;
- iov->lkey = ia->ri_bind_mem->lkey;
- return 0;
- }
-
- ipb.addr = iov->addr;
- ipb.size = iov->length;
- mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
- IB_ACCESS_LOCAL_WRITE, &iov->addr);
-
- dprintk("RPC: %s: phys convert: 0x%llx "
- "registered 0x%llx length %d\n",
- __func__, (unsigned long long)ipb.addr,
- (unsigned long long)iov->addr, len);
-
- if (IS_ERR(mr)) {
- *mrp = NULL;
- rc = PTR_ERR(mr);
- dprintk("RPC: %s: failed with %i\n", __func__, rc);
- } else {
- *mrp = mr;
- iov->lkey = mr->lkey;
- rc = 0;
- }
-
- return rc;
-}
-
-static int
-rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
- struct ib_mr *mr, struct ib_sge *iov)
-{
- int rc;
-
- ib_dma_unmap_single(ia->ri_device,
- iov->addr, iov->length, DMA_BIDIRECTIONAL);
-
- if (NULL == mr)
- return 0;
-
- rc = ib_dereg_mr(mr);
- if (rc)
- dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc);
- return rc;
-}
-
/**
* rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
* @ia: controlling rpcrdma_ia
@@ -1317,26 +1239,29 @@ struct rpcrdma_regbuf *
rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
{
struct rpcrdma_regbuf *rb;
- int rc;
+ struct ib_sge *iov;
- rc = -ENOMEM;
rb = kmalloc(sizeof(*rb) + size, flags);
if (rb == NULL)
goto out;
- rb->rg_size = size;
- rb->rg_owner = NULL;
- rc = rpcrdma_register_internal(ia, rb->rg_base, size,
- &rb->rg_mr, &rb->rg_iov);
- if (rc)
+ iov = &rb->rg_iov;
+ iov->addr = ib_dma_map_single(ia->ri_device,
+ (void *)rb->rg_base, size,
+ DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(ia->ri_device, iov->addr))
goto out_free;
+ iov->length = size;
+ iov->lkey = ia->ri_pd->local_dma_lkey;
+ rb->rg_size = size;
+ rb->rg_owner = NULL;
return rb;
out_free:
kfree(rb);
out:
- return ERR_PTR(rc);
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -1347,10 +1272,15 @@ out:
void
rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{
- if (rb) {
- rpcrdma_deregister_internal(ia, rb->rg_mr, &rb->rg_iov);
- kfree(rb);
- }
+ struct ib_sge *iov;
+
+ if (!rb)
+ return;
+
+ iov = &rb->rg_iov;
+ ib_dma_unmap_single(ia->ri_device,
+ iov->addr, iov->length, DMA_BIDIRECTIONAL);
+ kfree(rb);
}
/*
@@ -1363,9 +1293,11 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
struct rpcrdma_ep *ep,
struct rpcrdma_req *req)
{
+ struct ib_device *device = ia->ri_device;
struct ib_send_wr send_wr, *send_wr_fail;
struct rpcrdma_rep *rep = req->rl_reply;
- int rc;
+ struct ib_sge *iov = req->rl_send_iov;
+ int i, rc;
if (rep) {
rc = rpcrdma_ep_post_recv(ia, ep, rep);
@@ -1376,22 +1308,15 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
send_wr.next = NULL;
send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
- send_wr.sg_list = req->rl_send_iov;
+ send_wr.sg_list = iov;
send_wr.num_sge = req->rl_niovs;
send_wr.opcode = IB_WR_SEND;
- if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
- ib_dma_sync_single_for_device(ia->ri_device,
- req->rl_send_iov[3].addr,
- req->rl_send_iov[3].length,
- DMA_TO_DEVICE);
- ib_dma_sync_single_for_device(ia->ri_device,
- req->rl_send_iov[1].addr,
- req->rl_send_iov[1].length,
- DMA_TO_DEVICE);
- ib_dma_sync_single_for_device(ia->ri_device,
- req->rl_send_iov[0].addr,
- req->rl_send_iov[0].length,
- DMA_TO_DEVICE);
+
+ for (i = 0; i < send_wr.num_sge; i++)
+ ib_dma_sync_single_for_device(device, iov[i].addr,
+ iov[i].length, DMA_TO_DEVICE);
+ dprintk("RPC: %s: posting %d s/g entries\n",
+ __func__, send_wr.num_sge);
if (DECR_CQCOUNT(ep) > 0)
send_wr.send_flags = 0;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index e718d0959..c09414e6f 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -64,9 +64,7 @@ struct rpcrdma_ia {
struct ib_device *ri_device;
struct rdma_cm_id *ri_id;
struct ib_pd *ri_pd;
- struct ib_mr *ri_bind_mem;
- u32 ri_dma_lkey;
- int ri_have_dma_lkey;
+ struct ib_mr *ri_dma_mr;
struct completion ri_done;
int ri_async_rc;
unsigned int ri_max_frmr_depth;
@@ -88,7 +86,6 @@ struct rpcrdma_ep {
int rep_connected;
struct ib_qp_init_attr rep_attr;
wait_queue_head_t rep_connect_wait;
- struct rpcrdma_regbuf *rep_padbuf;
struct rdma_conn_param rep_remote_cma;
struct sockaddr_storage rep_remote_addr;
struct delayed_work rep_connect_worker;
@@ -118,7 +115,6 @@ struct rpcrdma_ep {
struct rpcrdma_regbuf {
size_t rg_size;
struct rpcrdma_req *rg_owner;
- struct ib_mr *rg_mr;
struct ib_sge rg_iov;
__be32 rg_base[0] __attribute__ ((aligned(256)));
};
@@ -164,8 +160,7 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
* struct rpcrdma_buffer. N is the max number of outstanding requests.
*/
-/* temporary static scatter/gather max */
-#define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */
+#define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE)
#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
struct rpcrdma_buffer;
@@ -257,16 +252,18 @@ struct rpcrdma_mr_seg { /* chunk descriptors */
char *mr_offset; /* kva if no page, else offset */
};
+#define RPCRDMA_MAX_IOVS (2)
+
struct rpcrdma_req {
- unsigned int rl_niovs; /* 0, 2 or 4 */
- unsigned int rl_nchunks; /* non-zero if chunks */
- unsigned int rl_connect_cookie; /* retry detection */
- struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
+ unsigned int rl_niovs;
+ unsigned int rl_nchunks;
+ unsigned int rl_connect_cookie;
+ struct rpcrdma_buffer *rl_buffer;
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
- struct ib_sge rl_send_iov[4]; /* for active requests */
- struct rpcrdma_regbuf *rl_rdmabuf;
- struct rpcrdma_regbuf *rl_sendbuf;
- struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
+ struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS];
+ struct rpcrdma_regbuf *rl_rdmabuf;
+ struct rpcrdma_regbuf *rl_sendbuf;
+ struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
};
static inline struct rpcrdma_req *
@@ -341,6 +338,7 @@ struct rpcrdma_stats {
unsigned long hardway_register_count;
unsigned long failed_marshal_count;
unsigned long bad_reply_count;
+ unsigned long nomsg_call_count;
};
/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 8a39b1e48..1a85e0ed0 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -777,7 +777,6 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
xs_sock_reset_connection_flags(xprt);
/* Mark transport as closed and wake up all pending tasks */
xprt_disconnect_done(xprt);
- xprt_force_disconnect(xprt);
}
/**
@@ -822,6 +821,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
if (atomic_read(&transport->xprt.swapper))
sk_clear_memalloc(sk);
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+
write_lock_bh(&sk->sk_callback_lock);
transport->inet = NULL;
transport->sock = NULL;
@@ -879,8 +880,11 @@ static void xs_xprt_free(struct rpc_xprt *xprt)
*/
static void xs_destroy(struct rpc_xprt *xprt)
{
+ struct sock_xprt *transport = container_of(xprt,
+ struct sock_xprt, xprt);
dprintk("RPC: xs_destroy xprt %p\n", xprt);
+ cancel_delayed_work_sync(&transport->connect_worker);
xs_close(xprt);
xs_xprt_free(xprt);
module_put(THIS_MODULE);
@@ -1872,7 +1876,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
sk->sk_data_ready = xs_local_data_ready;
sk->sk_write_space = xs_udp_write_space;
sk->sk_error_report = xs_error_report;
- sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_allocation = GFP_NOIO;
xprt_clear_connected(xprt);
@@ -2057,7 +2061,7 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_user_data = xprt;
sk->sk_data_ready = xs_udp_data_ready;
sk->sk_write_space = xs_udp_write_space;
- sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_allocation = GFP_NOIO;
xprt_set_connected(xprt);
@@ -2159,7 +2163,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_state_change = xs_tcp_state_change;
sk->sk_write_space = xs_tcp_write_space;
sk->sk_error_report = xs_error_report;
- sk->sk_allocation = GFP_ATOMIC;
+ sk->sk_allocation = GFP_NOIO;
/* socket options */
sock_reset_flag(sk, SOCK_LINGER);
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 9f2add3cb..77f5d17e2 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -16,6 +16,7 @@
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/ip_fib.h>
#include <net/switchdev.h>
@@ -634,6 +635,8 @@ static int switchdev_port_br_afspec(struct net_device *dev,
if (nla_len(attr) != sizeof(struct bridge_vlan_info))
return -EINVAL;
vinfo = nla_data(attr);
+ if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
+ return -EINVAL;
vlan->flags = vinfo->flags;
if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
if (vlan->vid_begin)
@@ -810,7 +813,7 @@ static int switchdev_port_fdb_dump_cb(struct net_device *dev,
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ ndm->ndm_state = obj->u.fdb.ndm_state;
if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
goto nla_put_failure;
@@ -853,12 +856,8 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
.cb = cb,
.idx = idx,
};
- int err;
-
- err = switchdev_port_obj_dump(dev, &dump.obj);
- if (err)
- return err;
+ switchdev_port_obj_dump(dev, &dump.obj);
return dump.idx;
}
EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
@@ -910,13 +909,9 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
if (switchdev_port_attr_get(dev, &attr))
return NULL;
- if (nhsel > 0) {
- if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
- return NULL;
- if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
- attr.u.ppid.id_len))
+ if (nhsel > 0 &&
+ !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
return NULL;
- }
prev_attr = attr;
}
@@ -1043,3 +1038,106 @@ void switchdev_fib_ipv4_abort(struct fib_info *fi)
fi->fib_net->ipv4.fib_offload_disabled = true;
}
EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
+
+static bool switchdev_port_same_parent_id(struct net_device *a,
+ struct net_device *b)
+{
+ struct switchdev_attr a_attr = {
+ .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+ .flags = SWITCHDEV_F_NO_RECURSE,
+ };
+ struct switchdev_attr b_attr = {
+ .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+ .flags = SWITCHDEV_F_NO_RECURSE,
+ };
+
+ if (switchdev_port_attr_get(a, &a_attr) ||
+ switchdev_port_attr_get(b, &b_attr))
+ return false;
+
+ return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
+}
+
+static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
+ struct net_device *group_dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+ if (lower_dev == dev)
+ continue;
+ if (switchdev_port_same_parent_id(dev, lower_dev))
+ return lower_dev->offload_fwd_mark;
+ return switchdev_port_fwd_mark_get(dev, lower_dev);
+ }
+
+ return dev->ifindex;
+}
+
+static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
+ u32 old_mark, u32 *reset_mark)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+ if (lower_dev->offload_fwd_mark == old_mark) {
+ if (!*reset_mark)
+ *reset_mark = lower_dev->ifindex;
+ lower_dev->offload_fwd_mark = *reset_mark;
+ }
+ switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
+ }
+}
+
+/**
+ * switchdev_port_fwd_mark_set - Set port offload forwarding mark
+ *
+ * @dev: port device
+ * @group_dev: containing device
+ * @joining: true if dev is joining group; false if leaving group
+ *
+ * An ungrouped port's offload mark is just its ifindex. A grouped
+ * port's (member of a bridge, for example) offload mark is the ifindex
+ * of one of the ports in the group with the same parent (switch) ID.
+ * Ports on the same device in the same group will have the same mark.
+ *
+ * Example:
+ *
+ * br0 ifindex=9
+ * sw1p1 ifindex=2 mark=2
+ * sw1p2 ifindex=3 mark=2
+ * sw2p1 ifindex=4 mark=5
+ * sw2p2 ifindex=5 mark=5
+ *
+ * If sw2p2 leaves the bridge, we'll have:
+ *
+ * br0 ifindex=9
+ * sw1p1 ifindex=2 mark=2
+ * sw1p2 ifindex=3 mark=2
+ * sw2p1 ifindex=4 mark=4
+ * sw2p2 ifindex=5 mark=5
+ */
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+ struct net_device *group_dev,
+ bool joining)
+{
+ u32 mark = dev->ifindex;
+ u32 reset_mark = 0;
+
+ if (group_dev && joining) {
+ mark = switchdev_port_fwd_mark_get(dev, group_dev);
+ } else if (group_dev && !joining) {
+ if (dev->offload_fwd_mark == mark)
+ /* Ohoh, this port was the mark reference port,
+ * but it's leaving the group, so reset the
+ * mark for the remaining ports in the group.
+ */
+ switchdev_port_fwd_mark_reset(group_dev, mark,
+ &reset_mark);
+ }
+
+ dev->offload_fwd_mark = mark;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index e7000be32..ed98c1fc3 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -94,10 +94,14 @@ __init int net_sysctl_init(void)
goto out;
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
- goto out;
+ goto out1;
register_sysctl_root(&net_sysctl_root);
out:
return ret;
+out1:
+ unregister_sysctl_table(net_header);
+ net_header = NULL;
+ goto out;
}
struct ctl_table_header *register_net_sysctl(struct net *net,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a816382fc..eadba62af 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -42,7 +42,8 @@
#include "core.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
-#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
+#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
+#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
const char tipc_bclink_name[] = "broadcast-link";
@@ -170,6 +171,30 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
}
/**
+ * bclink_prepare_wakeup - prepare users for wakeup after congestion
+ * @bcl: broadcast link
+ * @resultq: queue for users which can be woken up
+ * Move a number of waiting users, as permitted by available space in
+ * the send queue, from link wait queue to specified queue for wakeup
+ */
+static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+{
+ int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
+ int imp, lim;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
+ imp = TIPC_SKB_CB(skb)->chain_imp;
+ lim = bcl->window + bcl->backlog[imp].limit;
+ pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
+ if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
+ continue;
+ skb_unlink(skb, &bcl->wakeupq);
+ skb_queue_tail(resultq, skb);
+ }
+}
+
+/**
* tipc_bclink_wakeup_users - wake up pending users
*
* Called with no locks taken
@@ -177,8 +202,12 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
void tipc_bclink_wakeup_users(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *bcl = tn->bcl;
+ struct sk_buff_head resultq;
- tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
+ skb_queue_head_init(&resultq);
+ bclink_prepare_wakeup(bcl, &resultq);
+ tipc_sk_rcv(net, &resultq);
}
/**
@@ -316,6 +345,29 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
}
}
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
+{
+ u16 last = msg_last_bcast(hdr);
+ int mtyp = msg_type(hdr);
+
+ if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
+ return;
+ if (mtyp == STATE_MSG) {
+ tipc_bclink_update_link_state(n, last);
+ return;
+ }
+ /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
+ * and transfer synch info in LINK_PROTOCOL messages.
+ */
+ if (tipc_node_is_up(n))
+ return;
+ if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
+ return;
+ n->bclink.last_sent = last;
+ n->bclink.last_in = last;
+ n->bclink.oos_state = 0;
+}
+
/**
* bclink_peek_nack - monitor retransmission requests sent by other nodes
*
@@ -358,10 +410,9 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
/* Prepare clone of message for local node */
skb = tipc_msg_reassemble(list);
- if (unlikely(!skb)) {
- __skb_queue_purge(list);
+ if (unlikely(!skb))
return -EHOSTUNREACH;
- }
+
/* Broadcast to all nodes */
if (likely(bclink)) {
tipc_bclink_lock(net);
@@ -413,7 +464,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
* all nodes in the cluster don't ACK at the same time
*/
if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
- tipc_link_proto_xmit(node->active_links[node->addr & 1],
+ tipc_link_proto_xmit(node_active_link(node, node->addr),
STATE_MSG, 0, 0, 0, 0);
tn->bcl->stats.sent_acks++;
}
@@ -858,9 +909,10 @@ int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
if (!bcl)
return -ENOPROTOOPT;
- if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+ if (limit < BCLINK_WIN_MIN)
+ limit = BCLINK_WIN_MIN;
+ if (limit > TIPC_MAX_LINK_WIN)
return -EINVAL;
-
tipc_bclink_lock(net);
tipc_link_set_queue_limits(bcl, limit);
tipc_bclink_unlock(net);
@@ -925,7 +977,6 @@ int tipc_bclink_init(struct net *net)
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
bcl->bearer_id = MAX_BEARERS;
rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
- bcl->state = WORKING_WORKING;
bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
msg_set_prevnode(bcl->pmsg, tn->own_addr);
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 3c290a48f..d74c69bcf 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -133,5 +133,6 @@ void tipc_bclink_wakeup_users(struct net *net);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
void tipc_bclink_input(struct net *net);
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 00bc0e620..ce9f7bfc0 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -343,7 +343,7 @@ restart:
static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
{
pr_info("Resetting bearer <%s>\n", b_ptr->name);
- tipc_link_delete_list(net, b_ptr->identity);
+ tipc_node_delete_links(net, b_ptr->identity);
tipc_disc_reset(net, b_ptr);
return 0;
}
@@ -361,7 +361,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
pr_info("Disabling bearer <%s>\n", b_ptr->name);
b_ptr->media->disable_media(b_ptr);
- tipc_link_delete_list(net, b_ptr->identity);
+ tipc_node_delete_links(net, b_ptr->identity);
if (b_ptr->link_req)
tipc_disc_delete(b_ptr->link_req);
@@ -470,6 +470,32 @@ void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
rcu_read_unlock();
}
+/* tipc_bearer_xmit() -send buffer to destination over bearer
+ */
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr *dst)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_bearer *b;
+ struct sk_buff *skb, *tmp;
+
+ if (skb_queue_empty(xmitq))
+ return;
+
+ rcu_read_lock();
+ b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ if (likely(b)) {
+ skb_queue_walk_safe(xmitq, skb, tmp) {
+ __skb_dequeue(xmitq);
+ b->media->send_msg(net, skb, b, dst);
+ /* Until we remove cloning in tipc_l2_send_msg(): */
+ kfree_skb(skb);
+ }
+ }
+ rcu_read_unlock();
+}
+
/**
* tipc_l2_rcv_msg - handle incoming TIPC message from an interface
* @buf: the received packet
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index dc714d977..6426f242f 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -217,5 +217,8 @@ void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
struct tipc_media_addr *dest);
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr *dst);
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 0fcf133d5..b96b41eab 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -109,6 +109,11 @@ struct tipc_net {
atomic_t subscription_count;
};
+static inline struct tipc_net *tipc_net(struct net *net)
+{
+ return net_generic(net, tipc_net_id);
+}
+
static inline u16 mod(u16 x)
{
return x & 0xffffu;
@@ -129,6 +134,11 @@ static inline int less(u16 left, u16 right)
return less_eq(left, right) && (mod(right) != mod(left));
}
+static inline int in_range(u16 val, u16 min, u16 max)
+{
+ return !less(val, min) && !more(val, max);
+}
+
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 967e292f5..d14e0a4aa 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -35,7 +35,7 @@
*/
#include "core.h"
-#include "link.h"
+#include "node.h"
#include "discover.h"
/* min delay during bearer start up */
@@ -120,30 +120,24 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
* @buf: buffer containing message
* @bearer: bearer that message arrived on
*/
-void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
struct tipc_bearer *bearer)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_node *node;
- struct tipc_link *link;
struct tipc_media_addr maddr;
- struct sk_buff *rbuf;
- struct tipc_msg *msg = buf_msg(buf);
- u32 ddom = msg_dest_domain(msg);
- u32 onode = msg_prevnode(msg);
- u32 net_id = msg_bc_netid(msg);
- u32 mtyp = msg_type(msg);
- u32 signature = msg_node_sig(msg);
- u16 caps = msg_node_capabilities(msg);
- bool addr_match = false;
- bool sign_match = false;
- bool link_up = false;
- bool accept_addr = false;
- bool accept_sign = false;
+ struct sk_buff *rskb;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 ddom = msg_dest_domain(hdr);
+ u32 onode = msg_prevnode(hdr);
+ u32 net_id = msg_bc_netid(hdr);
+ u32 mtyp = msg_type(hdr);
+ u32 signature = msg_node_sig(hdr);
+ u16 caps = msg_node_capabilities(hdr);
bool respond = false;
+ bool dupl_addr = false;
- bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
- kfree_skb(buf);
+ bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
+ kfree_skb(skb);
/* Ensure message from node is valid and communication is permitted */
if (net_id != tn->net_id)
@@ -165,102 +159,20 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
if (!tipc_in_scope(bearer->domain, onode))
return;
- node = tipc_node_create(net, onode);
- if (!node)
- return;
- tipc_node_lock(node);
- node->capabilities = caps;
- link = node->links[bearer->identity];
-
- /* Prepare to validate requesting node's signature and media address */
- sign_match = (signature == node->signature);
- addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
- link_up = link && tipc_link_is_up(link);
-
-
- /* These three flags give us eight permutations: */
-
- if (sign_match && addr_match && link_up) {
- /* All is fine. Do nothing. */
- } else if (sign_match && addr_match && !link_up) {
- /* Respond. The link will come up in due time */
- respond = true;
- } else if (sign_match && !addr_match && link_up) {
- /* Peer has changed i/f address without rebooting.
- * If so, the link will reset soon, and the next
- * discovery will be accepted. So we can ignore it.
- * It may also be an cloned or malicious peer having
- * chosen the same node address and signature as an
- * existing one.
- * Ignore requests until the link goes down, if ever.
- */
- disc_dupl_alert(bearer, onode, &maddr);
- } else if (sign_match && !addr_match && !link_up) {
- /* Peer link has changed i/f address without rebooting.
- * It may also be a cloned or malicious peer; we can't
- * distinguish between the two.
- * The signature is correct, so we must accept.
- */
- accept_addr = true;
- respond = true;
- } else if (!sign_match && addr_match && link_up) {
- /* Peer node rebooted. Two possibilities:
- * - Delayed re-discovery; this link endpoint has already
- * reset and re-established contact with the peer, before
- * receiving a discovery message from that node.
- * (The peer happened to receive one from this node first).
- * - The peer came back so fast that our side has not
- * discovered it yet. Probing from this side will soon
- * reset the link, since there can be no working link
- * endpoint at the peer end, and the link will re-establish.
- * Accept the signature, since it comes from a known peer.
- */
- accept_sign = true;
- } else if (!sign_match && addr_match && !link_up) {
- /* The peer node has rebooted.
- * Accept signature, since it is a known peer.
- */
- accept_sign = true;
- respond = true;
- } else if (!sign_match && !addr_match && link_up) {
- /* Peer rebooted with new address, or a new/duplicate peer.
- * Ignore until the link goes down, if ever.
- */
+ tipc_node_check_dest(net, onode, bearer, caps, signature,
+ &maddr, &respond, &dupl_addr);
+ if (dupl_addr)
disc_dupl_alert(bearer, onode, &maddr);
- } else if (!sign_match && !addr_match && !link_up) {
- /* Peer rebooted with new address, or it is a new peer.
- * Accept signature and address.
- */
- accept_sign = true;
- accept_addr = true;
- respond = true;
- }
-
- if (accept_sign)
- node->signature = signature;
-
- if (accept_addr) {
- if (!link)
- link = tipc_link_create(node, bearer, &maddr);
- if (link) {
- memcpy(&link->media_addr, &maddr, sizeof(maddr));
- tipc_link_reset(link);
- } else {
- respond = false;
- }
- }
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
- rbuf = tipc_buf_acquire(MAX_H_SIZE);
- if (rbuf) {
- tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
- tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
- kfree_skb(rbuf);
+ rskb = tipc_buf_acquire(MAX_H_SIZE);
+ if (rskb) {
+ tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+ tipc_bearer_send(net, bearer->identity, rskb, &maddr);
+ kfree_skb(rskb);
}
}
- tipc_node_unlock(node);
- tipc_node_put(node);
}
/**
diff --git a/net/tipc/link.c b/net/tipc/link.c
index eaa9fe54b..75db07c78 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -48,9 +48,8 @@
/*
* Error message prefixes
*/
-static const char *link_co_err = "Link changeover error, ";
+static const char *link_co_err = "Link tunneling error, ";
static const char *link_rst_msg = "Resetting link ";
-static const char *link_unk_evt = "Unknown link event ";
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
[TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
@@ -77,256 +76,413 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
};
/*
+ * Interval between NACKs when packets arrive out of order
+ */
+#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
+/*
* Out-of-range value for link session numbers
*/
-#define INVALID_SESSION 0x10000
+#define WILDCARD_SESSION 0x10000
-/*
- * Link state events:
+/* Link FSM states:
*/
-#define STARTING_EVT 856384768 /* link processing trigger */
-#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
-#define SILENCE_EVT 560817u /* timer dicovered silence from peer */
+enum {
+ LINK_ESTABLISHED = 0xe,
+ LINK_ESTABLISHING = 0xe << 4,
+ LINK_RESET = 0x1 << 8,
+ LINK_RESETTING = 0x2 << 12,
+ LINK_PEER_RESET = 0xd << 16,
+ LINK_FAILINGOVER = 0xf << 20,
+ LINK_SYNCHING = 0xc << 24
+};
-/*
- * State value stored in 'failover_pkts'
+/* Link FSM state checking routines
*/
-#define FIRST_FAILOVER 0xffffu
-
-static void link_handle_out_of_seq_msg(struct tipc_link *link,
- struct sk_buff *skb);
-static void tipc_link_proto_rcv(struct tipc_link *link,
- struct sk_buff *skb);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
-static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static int link_is_up(struct tipc_link *l)
+{
+ return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
+}
+
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+ u16 rcvgap, int tolerance, int priority,
+ struct sk_buff_head *xmitq);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
-static void link_set_timer(struct tipc_link *link, unsigned long time);
+
/*
- * Simple link routines
+ * Simple non-static link routines (i.e. referenced outside this file)
*/
-static unsigned int align(unsigned int i)
+bool tipc_link_is_up(struct tipc_link *l)
{
- return (i + 3) & ~3u;
+ return link_is_up(l);
}
-static void tipc_link_release(struct kref *kref)
+bool tipc_link_is_reset(struct tipc_link *l)
{
- kfree(container_of(kref, struct tipc_link, ref));
+ return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
}
-static void tipc_link_get(struct tipc_link *l_ptr)
+bool tipc_link_is_synching(struct tipc_link *l)
{
- kref_get(&l_ptr->ref);
+ return l->state == LINK_SYNCHING;
}
-static void tipc_link_put(struct tipc_link *l_ptr)
+bool tipc_link_is_failingover(struct tipc_link *l)
{
- kref_put(&l_ptr->ref, tipc_link_release);
+ return l->state == LINK_FAILINGOVER;
}
-static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
+bool tipc_link_is_blocked(struct tipc_link *l)
{
- if (l->owner->active_links[0] != l)
- return l->owner->active_links[0];
- return l->owner->active_links[1];
+ return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
}
-/*
- * Simple non-static link routines (i.e. referenced outside this file)
- */
-int tipc_link_is_up(struct tipc_link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l)
{
- if (!l_ptr)
- return 0;
- return link_working_working(l_ptr) || link_working_unknown(l_ptr);
+ struct tipc_node *n = l->owner;
+
+ return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
}
-int tipc_link_is_active(struct tipc_link *l_ptr)
+static u32 link_own_addr(struct tipc_link *l)
{
- return (l_ptr->owner->active_links[0] == l_ptr) ||
- (l_ptr->owner->active_links[1] == l_ptr);
+ return msg_prevnode(l->pmsg);
}
/**
- * link_timeout - handle expiration of link timer
- * @l_ptr: pointer to link
+ * tipc_link_create - create a new link
+ * @n: pointer to associated node
+ * @b: pointer to associated bearer
+ * @ownnode: identity of own node
+ * @peer: identity of peer node
+ * @maddr: media address to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
+ *
+ * Returns true if link was created, otherwise false
*/
-static void link_timeout(unsigned long data)
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+ u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+ struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+ struct tipc_link **link)
{
- struct tipc_link *l_ptr = (struct tipc_link *)data;
- struct sk_buff *skb;
+ struct tipc_link *l;
+ struct tipc_msg *hdr;
+ char *if_name;
+
+ l = kzalloc(sizeof(*l), GFP_ATOMIC);
+ if (!l)
+ return false;
+ *link = l;
+
+ /* Note: peer i/f name is completed by reset/activate message */
+ if_name = strchr(b->name, ':') + 1;
+ sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+ tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+ if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+
+ l->addr = peer;
+ l->media_addr = maddr;
+ l->owner = n;
+ l->peer_session = WILDCARD_SESSION;
+ l->bearer_id = b->identity;
+ l->tolerance = b->tolerance;
+ l->net_plane = b->net_plane;
+ l->advertised_mtu = b->mtu;
+ l->mtu = b->mtu;
+ l->priority = b->priority;
+ tipc_link_set_queue_limits(l, b->window);
+ l->inputq = inputq;
+ l->namedq = namedq;
+ l->state = LINK_RESETTING;
+ l->pmsg = (struct tipc_msg *)&l->proto_msg;
+ hdr = l->pmsg;
+ tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
+ msg_set_size(hdr, sizeof(l->proto_msg));
+ msg_set_session(hdr, session);
+ msg_set_bearer_id(hdr, l->bearer_id);
+ strcpy((char *)msg_data(hdr), if_name);
+ __skb_queue_head_init(&l->transmq);
+ __skb_queue_head_init(&l->backlogq);
+ __skb_queue_head_init(&l->deferdq);
+ skb_queue_head_init(&l->wakeupq);
+ skb_queue_head_init(l->inputq);
+ return true;
+}
- tipc_node_lock(l_ptr->owner);
+/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ u16 last_sent;
- /* update counters used in statistical profiling of send traffic */
- l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
- l_ptr->stats.queue_sz_counts++;
+ skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+ 0, l->addr, link_own_addr(l), 0, 0, 0);
+ if (!skb)
+ return;
+ last_sent = tipc_bclink_get_last_sent(l->owner->net);
+ msg_set_last_bcast(buf_msg(skb), last_sent);
+ __skb_queue_head_init(&list);
+ __skb_queue_tail(&list, skb);
+ tipc_link_xmit(l, &list, xmitq);
+}
- skb = skb_peek(&l_ptr->transmq);
- if (skb) {
- struct tipc_msg *msg = buf_msg(skb);
- u32 length = msg_size(msg);
+/**
+ * tipc_link_fsm_evt - link finite state machine
+ * @l: pointer to link
+ * @evt: state machine event to be processed
+ */
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
+{
+ int rc = 0;
- if ((msg_user(msg) == MSG_FRAGMENTER) &&
- (msg_type(msg) == FIRST_FRAGMENT)) {
- length = msg_size(msg_get_wrapped(msg));
+ switch (l->state) {
+ case LINK_RESETTING:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_FAILURE_EVT:
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILOVER_END_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
}
- if (length) {
- l_ptr->stats.msg_lengths_total += length;
- l_ptr->stats.msg_length_counts++;
- if (length <= 64)
- l_ptr->stats.msg_length_profile[0]++;
- else if (length <= 256)
- l_ptr->stats.msg_length_profile[1]++;
- else if (length <= 1024)
- l_ptr->stats.msg_length_profile[2]++;
- else if (length <= 4096)
- l_ptr->stats.msg_length_profile[3]++;
- else if (length <= 16384)
- l_ptr->stats.msg_length_profile[4]++;
- else if (length <= 32768)
- l_ptr->stats.msg_length_profile[5]++;
- else
- l_ptr->stats.msg_length_profile[6]++;
+ break;
+ case LINK_RESET:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_ESTABLISHING;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ l->state = LINK_FAILINGOVER;
+ case LINK_FAILURE_EVT:
+ case LINK_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILOVER_END_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_PEER_RESET:
+ switch (evt) {
+ case LINK_RESET_EVT:
+ l->state = LINK_ESTABLISHING;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILURE_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
}
+ break;
+ case LINK_FAILINGOVER:
+ switch (evt) {
+ case LINK_FAILOVER_END_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_RESET_EVT:
+ case LINK_ESTABLISH_EVT:
+ case LINK_FAILURE_EVT:
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_ESTABLISHING:
+ switch (evt) {
+ case LINK_ESTABLISH_EVT:
+ l->state = LINK_ESTABLISHED;
+ rc |= TIPC_LINK_UP_EVT;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ l->state = LINK_FAILINGOVER;
+ break;
+ case LINK_PEER_RESET_EVT:
+ case LINK_RESET_EVT:
+ case LINK_FAILURE_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ break;
+ case LINK_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_ESTABLISHED:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_FAILURE_EVT:
+ l->state = LINK_RESETTING;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_ESTABLISH_EVT:
+ case LINK_SYNCH_END_EVT:
+ break;
+ case LINK_SYNCH_BEGIN_EVT:
+ l->state = LINK_SYNCHING;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case LINK_SYNCHING:
+ switch (evt) {
+ case LINK_PEER_RESET_EVT:
+ l->state = LINK_PEER_RESET;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_FAILURE_EVT:
+ l->state = LINK_RESETTING;
+ rc |= TIPC_LINK_DOWN_EVT;
+ break;
+ case LINK_RESET_EVT:
+ l->state = LINK_RESET;
+ break;
+ case LINK_ESTABLISH_EVT:
+ case LINK_SYNCH_BEGIN_EVT:
+ break;
+ case LINK_SYNCH_END_EVT:
+ l->state = LINK_ESTABLISHED;
+ break;
+ case LINK_FAILOVER_BEGIN_EVT:
+ case LINK_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ default:
+ pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
}
-
- /* do all other link processing performed on a periodic basis */
- if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
- link_state_event(l_ptr, SILENCE_EVT);
- l_ptr->silent_intv_cnt++;
- if (skb_queue_len(&l_ptr->backlogq))
- tipc_link_push_packets(l_ptr);
- link_set_timer(l_ptr, l_ptr->keepalive_intv);
- tipc_node_unlock(l_ptr->owner);
- tipc_link_put(l_ptr);
-}
-
-static void link_set_timer(struct tipc_link *link, unsigned long time)
-{
- if (!mod_timer(&link->timer, jiffies + time))
- tipc_link_get(link);
+ return rc;
+illegal_evt:
+ pr_err("Illegal FSM event %x in state %x on link %s\n",
+ evt, l->state, l->name);
+ return rc;
}
-/**
- * tipc_link_create - create a new link
- * @n_ptr: pointer to associated node
- * @b_ptr: pointer to associated bearer
- * @media_addr: media address to use when sending messages over link
- *
- * Returns pointer to link.
+/* link_profile_stats - update statistical profiling of traffic
*/
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
- struct tipc_bearer *b_ptr,
- const struct tipc_media_addr *media_addr)
+static void link_profile_stats(struct tipc_link *l)
{
- struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
- struct tipc_link *l_ptr;
+ struct sk_buff *skb;
struct tipc_msg *msg;
- char *if_name;
- char addr_string[16];
- u32 peer = n_ptr->addr;
+ int length;
- if (n_ptr->link_cnt >= MAX_BEARERS) {
- tipc_addr_string_fill(addr_string, n_ptr->addr);
- pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
- n_ptr->link_cnt, addr_string, MAX_BEARERS);
- return NULL;
- }
+ /* Update counters used in statistical profiling of send traffic */
+ l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
+ l->stats.queue_sz_counts++;
- if (n_ptr->links[b_ptr->identity]) {
- tipc_addr_string_fill(addr_string, n_ptr->addr);
- pr_err("Attempt to establish second link on <%s> to %s\n",
- b_ptr->name, addr_string);
- return NULL;
- }
+ skb = skb_peek(&l->transmq);
+ if (!skb)
+ return;
+ msg = buf_msg(skb);
+ length = msg_size(msg);
- l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
- if (!l_ptr) {
- pr_warn("Link creation failed, no memory\n");
- return NULL;
+ if (msg_user(msg) == MSG_FRAGMENTER) {
+ if (msg_type(msg) != FIRST_FRAGMENT)
+ return;
+ length = msg_size(msg_get_wrapped(msg));
}
- kref_init(&l_ptr->ref);
- l_ptr->addr = peer;
- if_name = strchr(b_ptr->name, ':') + 1;
- sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
- tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
- tipc_node(tn->own_addr),
- if_name,
- tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
- /* note: peer i/f name is updated by reset/activate message */
- memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
- l_ptr->owner = n_ptr;
- l_ptr->peer_session = INVALID_SESSION;
- l_ptr->bearer_id = b_ptr->identity;
- link_set_supervision_props(l_ptr, b_ptr->tolerance);
- l_ptr->state = RESET_UNKNOWN;
-
- l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
- msg = l_ptr->pmsg;
- tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
- l_ptr->addr);
- msg_set_size(msg, sizeof(l_ptr->proto_msg));
- msg_set_session(msg, (tn->random & 0xffff));
- msg_set_bearer_id(msg, b_ptr->identity);
- strcpy((char *)msg_data(msg), if_name);
- l_ptr->net_plane = b_ptr->net_plane;
- l_ptr->advertised_mtu = b_ptr->mtu;
- l_ptr->mtu = l_ptr->advertised_mtu;
- l_ptr->priority = b_ptr->priority;
- tipc_link_set_queue_limits(l_ptr, b_ptr->window);
- l_ptr->snd_nxt = 1;
- __skb_queue_head_init(&l_ptr->transmq);
- __skb_queue_head_init(&l_ptr->backlogq);
- __skb_queue_head_init(&l_ptr->deferdq);
- skb_queue_head_init(&l_ptr->wakeupq);
- skb_queue_head_init(&l_ptr->inputq);
- skb_queue_head_init(&l_ptr->namedq);
- link_reset_statistics(l_ptr);
- tipc_node_attach_link(n_ptr, l_ptr);
- setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
- link_state_event(l_ptr, STARTING_EVT);
-
- return l_ptr;
+ l->stats.msg_lengths_total += length;
+ l->stats.msg_length_counts++;
+ if (length <= 64)
+ l->stats.msg_length_profile[0]++;
+ else if (length <= 256)
+ l->stats.msg_length_profile[1]++;
+ else if (length <= 1024)
+ l->stats.msg_length_profile[2]++;
+ else if (length <= 4096)
+ l->stats.msg_length_profile[3]++;
+ else if (length <= 16384)
+ l->stats.msg_length_profile[4]++;
+ else if (length <= 32768)
+ l->stats.msg_length_profile[5]++;
+ else
+ l->stats.msg_length_profile[6]++;
}
-/**
- * tipc_link_delete - Delete a link
- * @l: link to be deleted
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
*/
-void tipc_link_delete(struct tipc_link *l)
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
- tipc_link_reset(l);
- if (del_timer(&l->timer))
- tipc_link_put(l);
- l->flags |= LINK_STOPPED;
- /* Delete link now, or when timer is finished: */
- tipc_link_reset_fragments(l);
- tipc_node_detach_link(l->owner, l);
- tipc_link_put(l);
-}
+ int rc = 0;
+ int mtyp = STATE_MSG;
+ bool xmit = false;
+ bool prb = false;
+
+ link_profile_stats(l);
+
+ switch (l->state) {
+ case LINK_ESTABLISHED:
+ case LINK_SYNCHING:
+ if (!l->silent_intv_cnt) {
+ if (tipc_bclink_acks_missing(l->owner))
+ xmit = true;
+ } else if (l->silent_intv_cnt <= l->abort_limit) {
+ xmit = true;
+ prb = true;
+ } else {
+ rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+ l->silent_intv_cnt++;
+ break;
+ case LINK_RESET:
+ xmit = true;
+ mtyp = RESET_MSG;
+ break;
+ case LINK_ESTABLISHING:
+ xmit = true;
+ mtyp = ACTIVATE_MSG;
+ break;
+ case LINK_PEER_RESET:
+ case LINK_RESETTING:
+ case LINK_FAILINGOVER:
+ break;
+ default:
+ break;
+ }
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *link;
- struct tipc_node *node;
+ if (xmit)
+ tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
- rcu_read_lock();
- list_for_each_entry_rcu(node, &tn->node_list, list) {
- tipc_node_lock(node);
- link = node->links[bearer_id];
- if (link)
- tipc_link_delete(link);
- tipc_node_unlock(node);
- }
- rcu_read_unlock();
+ return rc;
}
/**
@@ -334,7 +490,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
* @link: congested link
* @list: message that was attempted sent
* Create pseudo msg to send back to user when congestion abates
- * Only consumes message if there is an error
+ * Does not consume buffer list
*/
static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
{
@@ -347,8 +503,7 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
/* This really cannot happen... */
if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
- tipc_link_reset(link);
- goto err;
+ return -ENOBUFS;
}
/* Non-blocking sender: */
if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
@@ -358,15 +513,12 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
addr, addr, oport, 0, 0);
if (!skb)
- goto err;
+ return -ENOBUFS;
TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
TIPC_SKB_CB(skb)->chain_imp = imp;
skb_queue_tail(&link->wakeupq, skb);
link->stats.link_congs++;
return -ELINKCONG;
-err:
- __skb_queue_purge(list);
- return -ENOBUFS;
}
/**
@@ -388,9 +540,7 @@ void link_prepare_wakeup(struct tipc_link *l)
if ((pnd[imp] + l->backlog[imp].len) >= lim)
break;
skb_unlink(skb, &l->wakeupq);
- skb_queue_tail(&l->inputq, skb);
- l->owner->inputq = &l->inputq;
- l->owner->action_flags |= TIPC_MSG_EVT;
+ skb_queue_tail(l->inputq, skb);
}
}
@@ -426,208 +576,36 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
tipc_link_reset_fragments(l_ptr);
}
-void tipc_link_reset(struct tipc_link *l_ptr)
+void tipc_link_reset(struct tipc_link *l)
{
- u32 prev_state = l_ptr->state;
- int was_active_link = tipc_link_is_active(l_ptr);
- struct tipc_node *owner = l_ptr->owner;
- struct tipc_link *pl = tipc_parallel_link(l_ptr);
-
- msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
/* Link is down, accept any session */
- l_ptr->peer_session = INVALID_SESSION;
-
- /* Prepare for renewed mtu size negotiation */
- l_ptr->mtu = l_ptr->advertised_mtu;
-
- l_ptr->state = RESET_UNKNOWN;
+ l->peer_session = WILDCARD_SESSION;
- if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
- return;
-
- tipc_node_link_down(l_ptr->owner, l_ptr);
- tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
+ /* If peer is up, it only accepts an incremented session number */
+ msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
- if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
- l_ptr->flags |= LINK_FAILINGOVER;
- l_ptr->failover_checkpt = l_ptr->rcv_nxt;
- pl->failover_pkts = FIRST_FAILOVER;
- pl->failover_checkpt = l_ptr->rcv_nxt;
- pl->failover_skb = l_ptr->reasm_buf;
- } else {
- kfree_skb(l_ptr->reasm_buf);
- }
- /* Clean up all queues, except inputq: */
- __skb_queue_purge(&l_ptr->transmq);
- __skb_queue_purge(&l_ptr->deferdq);
- if (!owner->inputq)
- owner->inputq = &l_ptr->inputq;
- skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
- if (!skb_queue_empty(owner->inputq))
- owner->action_flags |= TIPC_MSG_EVT;
- tipc_link_purge_backlog(l_ptr);
- l_ptr->reasm_buf = NULL;
- l_ptr->rcv_unacked = 0;
- l_ptr->snd_nxt = 1;
- l_ptr->silent_intv_cnt = 0;
- l_ptr->stale_count = 0;
- link_reset_statistics(l_ptr);
-}
-
-static void link_activate(struct tipc_link *link)
-{
- struct tipc_node *node = link->owner;
-
- link->rcv_nxt = 1;
- link->stats.recv_info = 1;
- link->silent_intv_cnt = 0;
- tipc_node_link_up(node, link);
- tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
-}
-
-/**
- * link_state_event - link finite state machine
- * @l_ptr: pointer to link
- * @event: state machine event to process
- */
-static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
-{
- struct tipc_link *other;
- unsigned long timer_intv = l_ptr->keepalive_intv;
-
- if (l_ptr->flags & LINK_STOPPED)
- return;
-
- if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
- return; /* Not yet. */
-
- if (l_ptr->flags & LINK_FAILINGOVER)
- return;
-
- switch (l_ptr->state) {
- case WORKING_WORKING:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- case ACTIVATE_MSG:
- l_ptr->silent_intv_cnt = 0;
- break;
- case SILENCE_EVT:
- if (!l_ptr->silent_intv_cnt) {
- if (tipc_bclink_acks_missing(l_ptr->owner))
- tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 0, 0, 0, 0);
- break;
- }
- l_ptr->state = WORKING_UNKNOWN;
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- break;
- case RESET_MSG:
- pr_debug("%s<%s>, requested by peer\n",
- link_rst_msg, l_ptr->name);
- tipc_link_reset(l_ptr);
- l_ptr->state = RESET_RESET;
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0);
- break;
- default:
- pr_debug("%s%u in WW state\n", link_unk_evt, event);
- }
- break;
- case WORKING_UNKNOWN:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- case ACTIVATE_MSG:
- l_ptr->state = WORKING_WORKING;
- l_ptr->silent_intv_cnt = 0;
- break;
- case RESET_MSG:
- pr_debug("%s<%s>, requested by peer while probing\n",
- link_rst_msg, l_ptr->name);
- tipc_link_reset(l_ptr);
- l_ptr->state = RESET_RESET;
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0);
- break;
- case SILENCE_EVT:
- if (!l_ptr->silent_intv_cnt) {
- l_ptr->state = WORKING_WORKING;
- if (tipc_bclink_acks_missing(l_ptr->owner))
- tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 0, 0, 0, 0);
- } else if (l_ptr->silent_intv_cnt <
- l_ptr->abort_limit) {
- tipc_link_proto_xmit(l_ptr, STATE_MSG,
- 1, 0, 0, 0);
- } else { /* Link has failed */
- pr_debug("%s<%s>, peer not responding\n",
- link_rst_msg, l_ptr->name);
- tipc_link_reset(l_ptr);
- l_ptr->state = RESET_UNKNOWN;
- tipc_link_proto_xmit(l_ptr, RESET_MSG,
- 0, 0, 0, 0);
- }
- break;
- default:
- pr_err("%s%u in WU state\n", link_unk_evt, event);
- }
- break;
- case RESET_UNKNOWN:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- break;
- case ACTIVATE_MSG:
- other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other))
- break;
- l_ptr->state = WORKING_WORKING;
- link_activate(l_ptr);
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- if (l_ptr->owner->working_links == 1)
- tipc_link_sync_xmit(l_ptr);
- break;
- case RESET_MSG:
- l_ptr->state = RESET_RESET;
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 1, 0, 0, 0);
- break;
- case STARTING_EVT:
- l_ptr->flags |= LINK_STARTED;
- link_set_timer(l_ptr, timer_intv);
- break;
- case SILENCE_EVT:
- tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
- break;
- default:
- pr_err("%s%u in RU state\n", link_unk_evt, event);
- }
- break;
- case RESET_RESET:
- switch (event) {
- case TRAFFIC_MSG_EVT:
- case ACTIVATE_MSG:
- other = l_ptr->owner->active_links[0];
- if (other && link_working_unknown(other))
- break;
- l_ptr->state = WORKING_WORKING;
- link_activate(l_ptr);
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- if (l_ptr->owner->working_links == 1)
- tipc_link_sync_xmit(l_ptr);
- break;
- case RESET_MSG:
- break;
- case SILENCE_EVT:
- tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
- 0, 0, 0, 0);
- break;
- default:
- pr_err("%s%u in RR state\n", link_unk_evt, event);
- }
- break;
- default:
- pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
- }
+ /* Prepare for renewed mtu size negotiation */
+ l->mtu = l->advertised_mtu;
+
+ /* Clean up all queues: */
+ __skb_queue_purge(&l->transmq);
+ __skb_queue_purge(&l->deferdq);
+ skb_queue_splice_init(&l->wakeupq, l->inputq);
+
+ tipc_link_purge_backlog(l);
+ kfree_skb(l->reasm_buf);
+ kfree_skb(l->failover_reasm_skb);
+ l->reasm_buf = NULL;
+ l->failover_reasm_skb = NULL;
+ l->rcv_unacked = 0;
+ l->snd_nxt = 1;
+ l->rcv_nxt = 1;
+ l->silent_intv_cnt = 0;
+ l->stats.recv_info = 0;
+ l->stale_count = 0;
+ link_reset_statistics(l);
}
/**
@@ -635,8 +613,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
* @link: link to use
* @list: chain of buffers containing message
*
- * Consumes the buffer chain, except when returning -ELINKCONG,
- * since the caller then may want to make more send attempts.
+ * Consumes the buffer chain, except when returning an error code,
* Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
* Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
*/
@@ -650,7 +627,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
u16 ack = mod(link->rcv_nxt - 1);
u16 seqno = link->snd_nxt;
u16 bc_last_in = link->owner->bclink.last_in;
- struct tipc_media_addr *addr = &link->media_addr;
+ struct tipc_media_addr *addr = link->media_addr;
struct sk_buff_head *transmq = &link->transmq;
struct sk_buff_head *backlogq = &link->backlogq;
struct sk_buff *skb, *bskb;
@@ -660,10 +637,9 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
return link_schedule_user(link, list);
}
- if (unlikely(msg_size(msg) > mtu)) {
- __skb_queue_purge(list);
+ if (unlikely(msg_size(msg) > mtu))
return -EMSGSIZE;
- }
+
/* Prepare each packet for sending, and add to relevant queue: */
while (skb_queue_len(list)) {
skb = skb_peek(list);
@@ -700,101 +676,76 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
return 0;
}
-static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
-{
- skb_queue_head_init(list);
- __skb_queue_tail(list, skb);
-}
-
-static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
-{
- struct sk_buff_head head;
-
- skb2list(skb, &head);
- return __tipc_link_xmit(link->owner->net, link, &head);
-}
-
-/* tipc_link_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
- * messages, which will not be rejected
- * The only exception is datagram messages rerouted after secondary
- * lookup, which are rare and safe to dispose of anyway.
- * TODO: Return real return value, and let callers use
- * tipc_wait_for_sendpkt() where applicable
- */
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
- u32 selector)
-{
- struct sk_buff_head head;
- int rc;
-
- skb2list(skb, &head);
- rc = tipc_link_xmit(net, &head, dnode, selector);
- if (rc == -ELINKCONG)
- kfree_skb(skb);
- return 0;
-}
-
/**
- * tipc_link_xmit() is the general link level function for message sending
- * @net: the applicable net namespace
+ * tipc_link_xmit(): enqueue buffer list according to queue situation
+ * @link: link to use
* @list: chain of buffers containing message
- * @dsz: amount of user data to be sent
- * @dnode: address of destination node
- * @selector: a number used for deterministic link selection
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ * @xmitq: returned list of packets to be sent by caller
+ *
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
*/
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
- u32 selector)
+int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ struct sk_buff_head *xmitq)
{
- struct tipc_link *link = NULL;
- struct tipc_node *node;
- int rc = -EHOSTUNREACH;
+ struct tipc_msg *hdr = buf_msg(skb_peek(list));
+ unsigned int maxwin = l->window;
+ unsigned int i, imp = msg_importance(hdr);
+ unsigned int mtu = l->mtu;
+ u16 ack = l->rcv_nxt - 1;
+ u16 seqno = l->snd_nxt;
+ u16 bc_last_in = l->owner->bclink.last_in;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff_head *backlogq = &l->backlogq;
+ struct sk_buff *skb, *_skb, *bskb;
- node = tipc_node_find(net, dnode);
- if (node) {
- tipc_node_lock(node);
- link = node->active_links[selector & 1];
- if (link)
- rc = __tipc_link_xmit(net, link, list);
- tipc_node_unlock(node);
- tipc_node_put(node);
- }
- if (link)
- return rc;
-
- if (likely(in_own_node(net, dnode))) {
- tipc_sk_rcv(net, list);
- return 0;
+ /* Match msg importance against this and all higher backlog limits: */
+ for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+ if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+ return link_schedule_user(l, list);
}
+ if (unlikely(msg_size(hdr) > mtu))
+ return -EMSGSIZE;
- __skb_queue_purge(list);
- return rc;
-}
-
-/*
- * tipc_link_sync_xmit - synchronize broadcast link endpoints.
- *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
- *
- * Called with node locked
- */
-static void tipc_link_sync_xmit(struct tipc_link *link)
-{
- struct sk_buff *skb;
- struct tipc_msg *msg;
-
- skb = tipc_buf_acquire(INT_H_SIZE);
- if (!skb)
- return;
+ /* Prepare each packet for sending, and add to relevant queue: */
+ while (skb_queue_len(list)) {
+ skb = skb_peek(list);
+ hdr = buf_msg(skb);
+ msg_set_seqno(hdr, seqno);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_last_in);
- msg = buf_msg(skb);
- tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
- INT_H_SIZE, link->addr);
- msg_set_last_bcast(msg, link->owner->bclink.acked);
- __tipc_link_xmit_skb(link, skb);
+ if (likely(skb_queue_len(transmq) < maxwin)) {
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb)
+ return -ENOBUFS;
+ __skb_dequeue(list);
+ __skb_queue_tail(transmq, skb);
+ __skb_queue_tail(xmitq, _skb);
+ l->rcv_unacked = 0;
+ seqno++;
+ continue;
+ }
+ if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+ kfree_skb(__skb_dequeue(list));
+ l->stats.sent_bundled++;
+ continue;
+ }
+ if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+ kfree_skb(__skb_dequeue(list));
+ __skb_queue_tail(backlogq, bskb);
+ l->backlog[msg_importance(buf_msg(bskb))].len++;
+ l->stats.sent_bundled++;
+ l->stats.sent_bundles++;
+ continue;
+ }
+ l->backlog[imp].len += skb_queue_len(list);
+ skb_queue_splice_tail_init(list, backlogq);
+ }
+ l->snd_nxt = seqno;
+ return 0;
}
/*
@@ -842,29 +793,37 @@ void tipc_link_push_packets(struct tipc_link *link)
link->rcv_unacked = 0;
__skb_queue_tail(&link->transmq, skb);
tipc_bearer_send(link->owner->net, link->bearer_id,
- skb, &link->media_addr);
+ skb, link->media_addr);
}
link->snd_nxt = seqno;
}
-void tipc_link_reset_all(struct tipc_node *node)
+void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
{
- char addr_string[16];
- u32 i;
-
- tipc_node_lock(node);
+ struct sk_buff *skb, *_skb;
+ struct tipc_msg *hdr;
+ u16 seqno = l->snd_nxt;
+ u16 ack = l->rcv_nxt - 1;
- pr_warn("Resetting all links to %s\n",
- tipc_addr_string_fill(addr_string, node->addr));
-
- for (i = 0; i < MAX_BEARERS; i++) {
- if (node->links[i]) {
- link_print(node->links[i], "Resetting link\n");
- tipc_link_reset(node->links[i]);
- }
+ while (skb_queue_len(&l->transmq) < l->window) {
+ skb = skb_peek(&l->backlogq);
+ if (!skb)
+ break;
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb)
+ break;
+ __skb_dequeue(&l->backlogq);
+ hdr = buf_msg(skb);
+ l->backlog[msg_importance(hdr)].len--;
+ __skb_queue_tail(&l->transmq, skb);
+ __skb_queue_tail(xmitq, _skb);
+ msg_set_ack(hdr, ack);
+ msg_set_seqno(hdr, seqno);
+ msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ l->rcv_unacked = 0;
+ seqno++;
}
-
- tipc_node_unlock(node);
+ l->snd_nxt = seqno;
}
static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -877,9 +836,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
if (l_ptr->addr) {
/* Handle failure on standard link */
- link_print(l_ptr, "Resetting link\n");
- tipc_link_reset(l_ptr);
-
+ link_print(l_ptr, "Resetting link ");
+ pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+ msg_user(msg), msg_type(msg), msg_size(msg),
+ msg_errcode(msg));
+ pr_info("sqno %u, prev: %x, src: %x\n",
+ msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
} else {
/* Handle failure on broadcast link */
struct tipc_node *n_ptr;
@@ -934,191 +896,45 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
- &l_ptr->media_addr);
+ l_ptr->media_addr);
retransmits--;
l_ptr->stats.retransmitted++;
}
}
-/* link_synch(): check if all packets arrived before the synch
- * point have been consumed
- * Returns true if the parallel links are synched, otherwise false
- */
-static bool link_synch(struct tipc_link *l)
+static int tipc_link_retransm(struct tipc_link *l, int retransm,
+ struct sk_buff_head *xmitq)
{
- unsigned int post_synch;
- struct tipc_link *pl;
+ struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
+ struct tipc_msg *hdr;
- pl = tipc_parallel_link(l);
- if (pl == l)
- goto synched;
-
- /* Was last pre-synch packet added to input queue ? */
- if (less_eq(pl->rcv_nxt, l->synch_point))
- return false;
-
- /* Is it still in the input queue ? */
- post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
- if (skb_queue_len(&pl->inputq) > post_synch)
- return false;
-synched:
- l->flags &= ~LINK_SYNCHING;
- return true;
-}
-
-static void link_retrieve_defq(struct tipc_link *link,
- struct sk_buff_head *list)
-{
- u16 seq_no;
-
- if (skb_queue_empty(&link->deferdq))
- return;
-
- seq_no = buf_seqno(skb_peek(&link->deferdq));
- if (seq_no == link->rcv_nxt)
- skb_queue_splice_tail_init(&link->deferdq, list);
-}
-
-/**
- * tipc_rcv - process TIPC packets/messages arriving from off-node
- * @net: the applicable net namespace
- * @skb: TIPC packet
- * @b_ptr: pointer to bearer message arrived on
- *
- * Invoked with no locks held. Bearer pointer must point to a valid bearer
- * structure (i.e. cannot be NULL), but bearer can be inactive.
- */
-void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sk_buff_head head;
- struct tipc_node *n_ptr;
- struct tipc_link *l_ptr;
- struct sk_buff *skb1, *tmp;
- struct tipc_msg *msg;
- u16 seq_no;
- u16 ackd;
- u32 released;
-
- skb2list(skb, &head);
-
- while ((skb = __skb_dequeue(&head))) {
- /* Ensure message is well-formed */
- if (unlikely(!tipc_msg_validate(skb)))
- goto discard;
-
- /* Handle arrival of a non-unicast link message */
- msg = buf_msg(skb);
- if (unlikely(msg_non_seq(msg))) {
- if (msg_user(msg) == LINK_CONFIG)
- tipc_disc_rcv(net, skb, b_ptr);
- else
- tipc_bclink_rcv(net, skb);
- continue;
- }
-
- /* Discard unicast link messages destined for another node */
- if (unlikely(!msg_short(msg) &&
- (msg_destnode(msg) != tn->own_addr)))
- goto discard;
-
- /* Locate neighboring node that sent message */
- n_ptr = tipc_node_find(net, msg_prevnode(msg));
- if (unlikely(!n_ptr))
- goto discard;
-
- tipc_node_lock(n_ptr);
- /* Locate unicast link endpoint that should handle message */
- l_ptr = n_ptr->links[b_ptr->identity];
- if (unlikely(!l_ptr))
- goto unlock;
-
- /* Verify that communication with node is currently allowed */
- if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
- msg_user(msg) == LINK_PROTOCOL &&
- (msg_type(msg) == RESET_MSG ||
- msg_type(msg) == ACTIVATE_MSG) &&
- !msg_redundant_link(msg))
- n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
-
- if (tipc_node_blocked(n_ptr))
- goto unlock;
-
- /* Validate message sequence number info */
- seq_no = msg_seqno(msg);
- ackd = msg_ack(msg);
-
- /* Release acked messages */
- if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
- tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-
- released = 0;
- skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
- if (more(buf_seqno(skb1), ackd))
- break;
- __skb_unlink(skb1, &l_ptr->transmq);
- kfree_skb(skb1);
- released = 1;
- }
-
- /* Try sending any messages link endpoint has pending */
- if (unlikely(skb_queue_len(&l_ptr->backlogq)))
- tipc_link_push_packets(l_ptr);
-
- if (released && !skb_queue_empty(&l_ptr->wakeupq))
- link_prepare_wakeup(l_ptr);
-
- /* Process the incoming packet */
- if (unlikely(!link_working_working(l_ptr))) {
- if (msg_user(msg) == LINK_PROTOCOL) {
- tipc_link_proto_rcv(l_ptr, skb);
- link_retrieve_defq(l_ptr, &head);
- skb = NULL;
- goto unlock;
- }
-
- /* Traffic message. Conditionally activate link */
- link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
- if (link_working_working(l_ptr)) {
- /* Re-insert buffer in front of queue */
- __skb_queue_head(&head, skb);
- skb = NULL;
- goto unlock;
- }
- goto unlock;
- }
-
- /* Link is now in state WORKING_WORKING */
- if (unlikely(seq_no != l_ptr->rcv_nxt)) {
- link_handle_out_of_seq_msg(l_ptr, skb);
- link_retrieve_defq(l_ptr, &head);
- skb = NULL;
- goto unlock;
- }
- l_ptr->silent_intv_cnt = 0;
+ if (!skb)
+ return 0;
- /* Synchronize with parallel link if applicable */
- if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
- if (!link_synch(l_ptr))
- goto unlock;
- }
- l_ptr->rcv_nxt++;
- if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
- link_retrieve_defq(l_ptr, &head);
- if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
- l_ptr->stats.sent_acks++;
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
- }
- tipc_link_input(l_ptr, skb);
- skb = NULL;
-unlock:
- tipc_node_unlock(n_ptr);
- tipc_node_put(n_ptr);
-discard:
- if (unlikely(skb))
- kfree_skb(skb);
+ /* Detect repeated retransmit failures on same packet */
+ if (likely(l->last_retransm != buf_seqno(skb))) {
+ l->last_retransm = buf_seqno(skb);
+ l->stale_count = 1;
+ } else if (++l->stale_count > 100) {
+ link_retransmit_failure(l, skb);
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+ skb_queue_walk(&l->transmq, skb) {
+ if (!retransm)
+ return 0;
+ hdr = buf_msg(skb);
+ _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+ if (!_skb)
+ return 0;
+ hdr = buf_msg(_skb);
+ msg_set_ack(hdr, l->rcv_nxt - 1);
+ msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ _skb->priority = TC_PRIO_CONTROL;
+ __skb_queue_tail(xmitq, _skb);
+ retransm--;
+ l->stats.retransmitted++;
}
+ return 0;
}
/* tipc_data_input - deliver data and name distr msgs to upper layer
@@ -1126,29 +942,22 @@ discard:
* Consumes buffer if message is of right type
* Node lock must be held
*/
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+ struct sk_buff_head *inputq)
{
struct tipc_node *node = link->owner;
- struct tipc_msg *msg = buf_msg(skb);
- u32 dport = msg_destport(msg);
- switch (msg_user(msg)) {
+ switch (msg_user(buf_msg(skb))) {
case TIPC_LOW_IMPORTANCE:
case TIPC_MEDIUM_IMPORTANCE:
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
case CONN_MANAGER:
- if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
- node->inputq = &link->inputq;
- node->action_flags |= TIPC_MSG_EVT;
- }
+ __skb_queue_tail(inputq, skb);
return true;
case NAME_DISTRIBUTOR:
node->bclink.recv_permitted = true;
- node->namedq = &link->namedq;
- skb_queue_tail(&link->namedq, skb);
- if (skb_queue_len(&link->namedq) == 1)
- node->action_flags |= TIPC_NAMED_MSG_EVT;
+ skb_queue_tail(link->namedq, skb);
return true;
case MSG_BUNDLER:
case TUNNEL_PROTOCOL:
@@ -1165,54 +974,160 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
/* tipc_link_input - process packet that has passed link protocol check
*
* Consumes buffer
- * Node lock must be held
*/
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *inputq)
{
- struct tipc_node *node = link->owner;
- struct tipc_msg *msg = buf_msg(skb);
+ struct tipc_node *node = l->owner;
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct sk_buff **reasm_skb = &l->reasm_buf;
struct sk_buff *iskb;
+ int usr = msg_user(hdr);
+ int rc = 0;
int pos = 0;
+ int ipos = 0;
- if (likely(tipc_data_input(link, skb)))
- return;
+ if (unlikely(usr == TUNNEL_PROTOCOL)) {
+ if (msg_type(hdr) == SYNCH_MSG) {
+ __skb_queue_purge(&l->deferdq);
+ goto drop;
+ }
+ if (!tipc_msg_extract(skb, &iskb, &ipos))
+ return rc;
+ kfree_skb(skb);
+ skb = iskb;
+ hdr = buf_msg(skb);
+ if (less(msg_seqno(hdr), l->drop_point))
+ goto drop;
+ if (tipc_data_input(l, skb, inputq))
+ return rc;
+ usr = msg_user(hdr);
+ reasm_skb = &l->failover_reasm_skb;
+ }
- switch (msg_user(msg)) {
- case TUNNEL_PROTOCOL:
- if (msg_dup(msg)) {
- link->flags |= LINK_SYNCHING;
- link->synch_point = msg_seqno(msg_get_wrapped(msg));
- kfree_skb(skb);
- break;
+ if (usr == MSG_BUNDLER) {
+ l->stats.recv_bundles++;
+ l->stats.recv_bundled += msg_msgcnt(hdr);
+ while (tipc_msg_extract(skb, &iskb, &pos))
+ tipc_data_input(l, iskb, inputq);
+ return 0;
+ } else if (usr == MSG_FRAGMENTER) {
+ l->stats.recv_fragments++;
+ if (tipc_buf_append(reasm_skb, &skb)) {
+ l->stats.recv_fragmented++;
+ tipc_data_input(l, skb, inputq);
+ } else if (!*reasm_skb) {
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
- if (!tipc_link_failover_rcv(link, &skb))
- break;
- if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
- tipc_data_input(link, skb);
+ return 0;
+ } else if (usr == BCAST_PROTOCOL) {
+ tipc_link_sync_rcv(node, skb);
+ return 0;
+ }
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
+{
+ bool released = false;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&l->transmq, skb, tmp) {
+ if (more(buf_seqno(skb), acked))
break;
+ __skb_unlink(skb, &l->transmq);
+ kfree_skb(skb);
+ released = true;
+ }
+ return released;
+}
+
+/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
+ * @link: the link that should handle the message
+ * @skb: TIPC packet
+ * @xmitq: queue to place packets to be sent after this call
+ */
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head *arrvq = &l->deferdq;
+ struct sk_buff_head tmpq;
+ struct tipc_msg *hdr;
+ u16 seqno, rcv_nxt;
+ int rc = 0;
+
+ __skb_queue_head_init(&tmpq);
+
+ if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
+ if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
+ tipc_link_build_proto_msg(l, STATE_MSG, 0,
+ 0, 0, 0, xmitq);
+ return rc;
+ }
+
+ while ((skb = skb_peek(arrvq))) {
+ hdr = buf_msg(skb);
+
+ /* Verify and update link state */
+ if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
+ __skb_dequeue(arrvq);
+ rc = tipc_link_proto_rcv(l, skb, xmitq);
+ continue;
}
- case MSG_BUNDLER:
- link->stats.recv_bundles++;
- link->stats.recv_bundled += msg_msgcnt(msg);
- while (tipc_msg_extract(skb, &iskb, &pos))
- tipc_data_input(link, iskb);
- break;
- case MSG_FRAGMENTER:
- link->stats.recv_fragments++;
- if (tipc_buf_append(&link->reasm_buf, &skb)) {
- link->stats.recv_fragmented++;
- tipc_data_input(link, skb);
- } else if (!link->reasm_buf) {
- tipc_link_reset(link);
+ if (unlikely(!link_is_up(l))) {
+ rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+ if (!link_is_up(l)) {
+ kfree_skb(__skb_dequeue(arrvq));
+ goto exit;
+ }
}
- break;
- case BCAST_PROTOCOL:
- tipc_link_sync_rcv(node, skb);
- break;
- default:
- break;
- };
+
+ l->silent_intv_cnt = 0;
+
+ /* Forward queues and wake up waiting users */
+ if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
+ }
+
+ /* Defer reception if there is a gap in the sequence */
+ seqno = msg_seqno(hdr);
+ rcv_nxt = l->rcv_nxt;
+ if (unlikely(less(rcv_nxt, seqno))) {
+ l->stats.deferred_recv++;
+ goto exit;
+ }
+
+ __skb_dequeue(arrvq);
+
+ /* Drop if packet already received */
+ if (unlikely(more(rcv_nxt, seqno))) {
+ l->stats.duplicates++;
+ kfree_skb(skb);
+ goto exit;
+ }
+
+ /* Packet can be delivered */
+ l->rcv_nxt++;
+ l->stats.recv_info++;
+ if (unlikely(!tipc_data_input(l, skb, &tmpq)))
+ rc = tipc_link_input(l, skb, &tmpq);
+
+ /* Ack at regular intervals */
+ if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
+ l->rcv_unacked = 0;
+ l->stats.sent_acks++;
+ tipc_link_build_proto_msg(l, STATE_MSG,
+ 0, 0, 0, 0, xmitq);
+ }
+ }
+exit:
+ tipc_skb_queue_splice_tail(&tmpq, l->inputq);
+ return rc;
}
/**
@@ -1255,458 +1170,250 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
}
/*
- * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
+ * Send protocol message to the other endpoint.
*/
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
- struct sk_buff *buf)
+void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
+ u32 gap, u32 tolerance, u32 priority)
{
- u32 seq_no = buf_seqno(buf);
+ struct sk_buff *skb = NULL;
+ struct sk_buff_head xmitq;
- if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
- tipc_link_proto_rcv(l_ptr, buf);
+ __skb_queue_head_init(&xmitq);
+ tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
+ tolerance, priority, &xmitq);
+ skb = __skb_dequeue(&xmitq);
+ if (!skb)
return;
- }
-
- /* Record OOS packet arrival */
- l_ptr->silent_intv_cnt = 0;
+ tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+ l->rcv_unacked = 0;
+ kfree_skb(skb);
+}
- /*
- * Discard packet if a duplicate; otherwise add it to deferred queue
- * and notify peer of gap as per protocol specification
- */
- if (less(seq_no, l_ptr->rcv_nxt)) {
- l_ptr->stats.duplicates++;
- kfree_skb(buf);
+/* tipc_link_build_proto_msg: prepare link protocol message for transmission
+ */
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+ u16 rcvgap, int tolerance, int priority,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb = NULL;
+ struct tipc_msg *hdr = l->pmsg;
+ u16 snd_nxt = l->snd_nxt;
+ u16 rcv_nxt = l->rcv_nxt;
+ u16 rcv_last = rcv_nxt - 1;
+ int node_up = l->owner->bclink.recv_permitted;
+
+ /* Don't send protocol message during reset or link failover */
+ if (tipc_link_is_blocked(l))
return;
- }
- if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
- l_ptr->stats.deferred_recv++;
- if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
+ msg_set_type(hdr, mtyp);
+ msg_set_net_plane(hdr, l->net_plane);
+ msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+ msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+ msg_set_link_tolerance(hdr, tolerance);
+ msg_set_linkprio(hdr, priority);
+ msg_set_redundant_link(hdr, node_up);
+ msg_set_seq_gap(hdr, 0);
+
+ /* Compatibility: created msg must not be in sequence with pkt flow */
+ msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+
+ if (mtyp == STATE_MSG) {
+ if (!tipc_link_is_up(l))
+ return;
+ msg_set_next_sent(hdr, snd_nxt);
+
+ /* Override rcvgap if there are packets in deferred queue */
+ if (!skb_queue_empty(&l->deferdq))
+ rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+ if (rcvgap) {
+ msg_set_seq_gap(hdr, rcvgap);
+ l->stats.sent_nacks++;
+ }
+ msg_set_ack(hdr, rcv_last);
+ msg_set_probe(hdr, probe);
+ if (probe)
+ l->stats.sent_probes++;
+ l->stats.sent_states++;
} else {
- l_ptr->stats.duplicates++;
+ /* RESET_MSG or ACTIVATE_MSG */
+ msg_set_max_pkt(hdr, l->advertised_mtu);
+ msg_set_ack(hdr, l->rcv_nxt - 1);
+ msg_set_next_sent(hdr, 1);
}
+ skb = tipc_buf_acquire(msg_size(hdr));
+ if (!skb)
+ return;
+ skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
+ skb->priority = TC_PRIO_CONTROL;
+ __skb_queue_tail(xmitq, skb);
}
-/*
- * Send protocol message to the other endpoint.
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
+ * with contents of the link's tranmsit and backlog queues.
*/
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
- u32 gap, u32 tolerance, u32 priority)
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ int mtyp, struct sk_buff_head *xmitq)
{
- struct sk_buff *buf = NULL;
- struct tipc_msg *msg = l_ptr->pmsg;
- u32 msg_size = sizeof(l_ptr->proto_msg);
- int r_flag;
- u16 last_rcv;
-
- /* Don't send protocol message during link failover */
- if (l_ptr->flags & LINK_FAILINGOVER)
- return;
+ struct sk_buff *skb, *tnlskb;
+ struct tipc_msg *hdr, tnlhdr;
+ struct sk_buff_head *queue = &l->transmq;
+ struct sk_buff_head tmpxq, tnlq;
+ u16 pktlen, pktcnt, seqno = l->snd_nxt;
- /* Abort non-RESET send if communication with node is prohibited */
- if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
+ if (!tnl)
return;
- /* Create protocol message with "out-of-sequence" sequence number */
- msg_set_type(msg, msg_typ);
- msg_set_net_plane(msg, l_ptr->net_plane);
- msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
- msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
-
- if (msg_typ == STATE_MSG) {
- u16 next_sent = l_ptr->snd_nxt;
+ skb_queue_head_init(&tnlq);
+ skb_queue_head_init(&tmpxq);
- if (!tipc_link_is_up(l_ptr))
+ /* At least one packet required for safe algorithm => add dummy */
+ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
+ 0, 0, TIPC_ERR_NO_PORT);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+ skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, &tmpxq);
+ __skb_queue_purge(&tmpxq);
+
+ /* Initialize reusable tunnel packet header */
+ tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
+ mtyp, INT_H_SIZE, l->addr);
+ pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+ msg_set_msgcnt(&tnlhdr, pktcnt);
+ msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
+tnl:
+ /* Wrap each packet into a tunnel packet */
+ skb_queue_walk(queue, skb) {
+ hdr = buf_msg(skb);
+ if (queue == &l->backlogq)
+ msg_set_seqno(hdr, seqno++);
+ pktlen = msg_size(hdr);
+ msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
+ tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+ if (!tnlskb) {
+ pr_warn("%sunable to send packet\n", link_co_err);
return;
- msg_set_next_sent(msg, next_sent);
- if (!skb_queue_empty(&l_ptr->deferdq)) {
- last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
- gap = mod(last_rcv - l_ptr->rcv_nxt);
}
- msg_set_seq_gap(msg, gap);
- if (gap)
- l_ptr->stats.sent_nacks++;
- msg_set_link_tolerance(msg, tolerance);
- msg_set_linkprio(msg, priority);
- msg_set_max_pkt(msg, l_ptr->mtu);
- msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
- msg_set_probe(msg, probe_msg != 0);
- if (probe_msg)
- l_ptr->stats.sent_probes++;
- l_ptr->stats.sent_states++;
- } else { /* RESET_MSG or ACTIVATE_MSG */
- msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
- msg_set_seq_gap(msg, 0);
- msg_set_next_sent(msg, 1);
- msg_set_probe(msg, 0);
- msg_set_link_tolerance(msg, l_ptr->tolerance);
- msg_set_linkprio(msg, l_ptr->priority);
- msg_set_max_pkt(msg, l_ptr->advertised_mtu);
+ skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
+ __skb_queue_tail(&tnlq, tnlskb);
+ }
+ if (queue != &l->backlogq) {
+ queue = &l->backlogq;
+ goto tnl;
}
- r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
- msg_set_redundant_link(msg, r_flag);
- msg_set_linkprio(msg, l_ptr->priority);
- msg_set_size(msg, msg_size);
-
- msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
-
- buf = tipc_buf_acquire(msg_size);
- if (!buf)
- return;
+ tipc_link_xmit(tnl, &tnlq, xmitq);
- skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
- buf->priority = TC_PRIO_CONTROL;
- tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
- &l_ptr->media_addr);
- l_ptr->rcv_unacked = 0;
- kfree_skb(buf);
+ if (mtyp == FAILOVER_MSG) {
+ tnl->drop_point = l->rcv_nxt;
+ tnl->failover_reasm_skb = l->reasm_buf;
+ l->reasm_buf = NULL;
+ }
}
-/*
- * Receive protocol message :
+/* tipc_link_proto_rcv(): receive link level protocol message :
* Note that network plane id propagates through the network, and may
- * change at any time. The node with lowest address rules
+ * change at any time. The node with lowest numerical id determines
+ * network plane
*/
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
- struct sk_buff *buf)
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
- u32 rec_gap = 0;
- u32 msg_tol;
- struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_msg *hdr = buf_msg(skb);
+ u16 rcvgap = 0;
+ u16 nacked_gap = msg_seq_gap(hdr);
+ u16 peers_snd_nxt = msg_next_sent(hdr);
+ u16 peers_tol = msg_link_tolerance(hdr);
+ u16 peers_prio = msg_linkprio(hdr);
+ u16 rcv_nxt = l->rcv_nxt;
+ char *if_name;
+ int rc = 0;
- if (l_ptr->flags & LINK_FAILINGOVER)
+ if (tipc_link_is_blocked(l))
goto exit;
- if (l_ptr->net_plane != msg_net_plane(msg))
- if (link_own_addr(l_ptr) > msg_prevnode(msg))
- l_ptr->net_plane = msg_net_plane(msg);
-
- switch (msg_type(msg)) {
+ if (link_own_addr(l) > msg_prevnode(hdr))
+ l->net_plane = msg_net_plane(hdr);
+ switch (msg_type(hdr)) {
case RESET_MSG:
- if (!link_working_unknown(l_ptr) &&
- (l_ptr->peer_session != INVALID_SESSION)) {
- if (less_eq(msg_session(msg), l_ptr->peer_session))
- break; /* duplicate or old reset: ignore */
- }
-
- if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
- link_working_unknown(l_ptr))) {
- /*
- * peer has lost contact -- don't allow peer's links
- * to reactivate before we recognize loss & clean up
- */
- l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
- }
-
- link_state_event(l_ptr, RESET_MSG);
+ /* Ignore duplicate RESET with old session number */
+ if ((less_eq(msg_session(hdr), l->peer_session)) &&
+ (l->peer_session != WILDCARD_SESSION))
+ break;
/* fall thru' */
- case ACTIVATE_MSG:
- /* Update link settings according other endpoint's values */
- strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
- msg_tol = msg_link_tolerance(msg);
- if (msg_tol > l_ptr->tolerance)
- link_set_supervision_props(l_ptr, msg_tol);
-
- if (msg_linkprio(msg) > l_ptr->priority)
- l_ptr->priority = msg_linkprio(msg);
-
- if (l_ptr->mtu > msg_max_pkt(msg))
- l_ptr->mtu = msg_max_pkt(msg);
-
- /* Synchronize broadcast link info, if not done previously */
- if (!tipc_node_is_up(l_ptr->owner)) {
- l_ptr->owner->bclink.last_sent =
- l_ptr->owner->bclink.last_in =
- msg_last_bcast(msg);
- l_ptr->owner->bclink.oos_state = 0;
- }
-
- l_ptr->peer_session = msg_session(msg);
- l_ptr->peer_bearer_id = msg_bearer_id(msg);
-
- if (msg_type(msg) == ACTIVATE_MSG)
- link_state_event(l_ptr, ACTIVATE_MSG);
- break;
- case STATE_MSG:
+ case ACTIVATE_MSG:
- msg_tol = msg_link_tolerance(msg);
- if (msg_tol)
- link_set_supervision_props(l_ptr, msg_tol);
-
- if (msg_linkprio(msg) &&
- (msg_linkprio(msg) != l_ptr->priority)) {
- pr_debug("%s<%s>, priority change %u->%u\n",
- link_rst_msg, l_ptr->name,
- l_ptr->priority, msg_linkprio(msg));
- l_ptr->priority = msg_linkprio(msg);
- tipc_link_reset(l_ptr); /* Enforce change to take effect */
+ /* Complete own link name with peer's interface name */
+ if_name = strrchr(l->name, ':') + 1;
+ if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
break;
- }
-
- /* Record reception; force mismatch at next timeout: */
- l_ptr->silent_intv_cnt = 0;
-
- link_state_event(l_ptr, TRAFFIC_MSG_EVT);
- l_ptr->stats.recv_states++;
- if (link_reset_unknown(l_ptr))
+ if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
break;
+ strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
- if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
- rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
+ /* Update own tolerance if peer indicates a non-zero value */
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ l->tolerance = peers_tol;
- if (msg_probe(msg))
- l_ptr->stats.recv_probes++;
+ /* Update own priority if peer's priority is higher */
+ if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+ l->priority = peers_prio;
- /* Protocol message before retransmits, reduce loss risk */
- if (l_ptr->owner->bclink.recv_permitted)
- tipc_bclink_update_link_state(l_ptr->owner,
- msg_last_bcast(msg));
-
- if (rec_gap || (msg_probe(msg))) {
- tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
- rec_gap, 0, 0);
- }
- if (msg_seq_gap(msg)) {
- l_ptr->stats.recv_nacks++;
- tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
- msg_seq_gap(msg));
+ if (msg_type(hdr) == RESET_MSG) {
+ rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+ } else if (!link_is_up(l)) {
+ tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+ rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
}
+ l->peer_session = msg_session(hdr);
+ l->peer_bearer_id = msg_bearer_id(hdr);
+ if (l->mtu > msg_max_pkt(hdr))
+ l->mtu = msg_max_pkt(hdr);
break;
- }
-exit:
- kfree_skb(buf);
-}
-
-
-/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
- * a different bearer. Owner node is locked.
- */
-static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
- struct tipc_msg *tunnel_hdr,
- struct tipc_msg *msg,
- u32 selector)
-{
- struct tipc_link *tunnel;
- struct sk_buff *skb;
- u32 length = msg_size(msg);
-
- tunnel = l_ptr->owner->active_links[selector & 1];
- if (!tipc_link_is_up(tunnel)) {
- pr_warn("%stunnel link no longer available\n", link_co_err);
- return;
- }
- msg_set_size(tunnel_hdr, length + INT_H_SIZE);
- skb = tipc_buf_acquire(length + INT_H_SIZE);
- if (!skb) {
- pr_warn("%sunable to send tunnel msg\n", link_co_err);
- return;
- }
- skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
- skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
- __tipc_link_xmit_skb(tunnel, skb);
-}
-
-
-/* tipc_link_failover_send_queue(): A link has gone down, but a second
- * link is still active. We can do failover. Tunnel the failing link's
- * whole send queue via the remaining link. This way, we don't lose
- * any packets, and sequence order is preserved for subsequent traffic
- * sent over the remaining link. Owner node is locked.
- */
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
-{
- int msgcount;
- struct tipc_link *tunnel = l_ptr->owner->active_links[0];
- struct tipc_msg tunnel_hdr;
- struct sk_buff *skb;
- int split_bundles;
-
- if (!tunnel)
- return;
- tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
- FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
-
- skb_queue_walk(&l_ptr->backlogq, skb) {
- msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
- l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
- }
- skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
- tipc_link_purge_backlog(l_ptr);
- msgcount = skb_queue_len(&l_ptr->transmq);
- msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
- msg_set_msgcnt(&tunnel_hdr, msgcount);
-
- if (skb_queue_empty(&l_ptr->transmq)) {
- skb = tipc_buf_acquire(INT_H_SIZE);
- if (skb) {
- skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
- msg_set_size(&tunnel_hdr, INT_H_SIZE);
- __tipc_link_xmit_skb(tunnel, skb);
- } else {
- pr_warn("%sunable to send changeover msg\n",
- link_co_err);
- }
- return;
- }
-
- split_bundles = (l_ptr->owner->active_links[0] !=
- l_ptr->owner->active_links[1]);
+ case STATE_MSG:
- skb_queue_walk(&l_ptr->transmq, skb) {
- struct tipc_msg *msg = buf_msg(skb);
+ /* Update own tolerance if peer indicates a non-zero value */
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ l->tolerance = peers_tol;
- if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
- struct tipc_msg *m = msg_get_wrapped(msg);
- unchar *pos = (unchar *)m;
+ l->silent_intv_cnt = 0;
+ l->stats.recv_states++;
+ if (msg_probe(hdr))
+ l->stats.recv_probes++;
+ rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+ if (!link_is_up(l))
+ break;
- msgcount = msg_msgcnt(msg);
- while (msgcount--) {
- msg_set_seqno(m, msg_seqno(msg));
- tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
- msg_link_selector(m));
- pos += align(msg_size(m));
- m = (struct tipc_msg *)pos;
- }
- } else {
- tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
- msg_link_selector(msg));
+ /* Send NACK if peer has sent pkts we haven't received yet */
+ if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
+ rcvgap = peers_snd_nxt - l->rcv_nxt;
+ if (rcvgap || (msg_probe(hdr)))
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
+ 0, 0, xmitq);
+ tipc_link_release_pkts(l, msg_ack(hdr));
+
+ /* If NACK, retransmit will now start at right position */
+ if (nacked_gap) {
+ rc = tipc_link_retransm(l, nacked_gap, xmitq);
+ l->stats.recv_nacks++;
}
- }
-}
-
-/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
- * duplicate of the first link's send queue via the new link. This way, we
- * are guaranteed that currently queued packets from a socket are delivered
- * before future traffic from the same socket, even if this is using the
- * new link. The last arriving copy of each duplicate packet is dropped at
- * the receiving end by the regular protocol check, so packet cardinality
- * and sequence order is preserved per sender/receiver socket pair.
- * Owner node is locked.
- */
-void tipc_link_dup_queue_xmit(struct tipc_link *link,
- struct tipc_link *tnl)
-{
- struct sk_buff *skb;
- struct tipc_msg tnl_hdr;
- struct sk_buff_head *queue = &link->transmq;
- int mcnt;
- u16 seqno;
-
- tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
- SYNCH_MSG, INT_H_SIZE, link->addr);
- mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
- msg_set_msgcnt(&tnl_hdr, mcnt);
- msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
-
-tunnel_queue:
- skb_queue_walk(queue, skb) {
- struct sk_buff *outskb;
- struct tipc_msg *msg = buf_msg(skb);
- u32 len = msg_size(msg);
- msg_set_ack(msg, mod(link->rcv_nxt - 1));
- msg_set_bcast_ack(msg, link->owner->bclink.last_in);
- msg_set_size(&tnl_hdr, len + INT_H_SIZE);
- outskb = tipc_buf_acquire(len + INT_H_SIZE);
- if (outskb == NULL) {
- pr_warn("%sunable to send duplicate msg\n",
- link_co_err);
- return;
- }
- skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
- skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
- skb->data, len);
- __tipc_link_xmit_skb(tnl, outskb);
- if (!tipc_link_is_up(link))
- return;
- }
- if (queue == &link->backlogq)
- return;
- seqno = link->snd_nxt;
- skb_queue_walk(&link->backlogq, skb) {
- msg_set_seqno(buf_msg(skb), seqno);
- seqno = mod(seqno + 1);
- }
- queue = &link->backlogq;
- goto tunnel_queue;
-}
-
-/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
- * Owner node is locked.
- */
-static bool tipc_link_failover_rcv(struct tipc_link *link,
- struct sk_buff **skb)
-{
- struct tipc_msg *msg = buf_msg(*skb);
- struct sk_buff *iskb = NULL;
- struct tipc_link *pl = NULL;
- int bearer_id = msg_bearer_id(msg);
- int pos = 0;
-
- if (msg_type(msg) != FAILOVER_MSG) {
- pr_warn("%sunknown tunnel pkt received\n", link_co_err);
- goto exit;
- }
- if (bearer_id >= MAX_BEARERS)
- goto exit;
-
- if (bearer_id == link->bearer_id)
- goto exit;
-
- pl = link->owner->links[bearer_id];
- if (pl && tipc_link_is_up(pl))
- tipc_link_reset(pl);
-
- if (link->failover_pkts == FIRST_FAILOVER)
- link->failover_pkts = msg_msgcnt(msg);
-
- /* Should we expect an inner packet? */
- if (!link->failover_pkts)
- goto exit;
-
- if (!tipc_msg_extract(*skb, &iskb, &pos)) {
- pr_warn("%sno inner failover pkt\n", link_co_err);
- *skb = NULL;
- goto exit;
- }
- link->failover_pkts--;
- *skb = NULL;
-
- /* Was this packet already delivered? */
- if (less(buf_seqno(iskb), link->failover_checkpt)) {
- kfree_skb(iskb);
- iskb = NULL;
- goto exit;
- }
- if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
- link->stats.recv_fragments++;
- tipc_buf_append(&link->failover_skb, &iskb);
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
}
exit:
- if (!link->failover_pkts && pl)
- pl->flags &= ~LINK_FAILINGOVER;
- kfree_skb(*skb);
- *skb = iskb;
- return *skb;
-}
-
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
-{
- unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
-
- if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
- return;
-
- l_ptr->tolerance = tol;
- l_ptr->keepalive_intv = msecs_to_jiffies(intv);
- l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
+ kfree_skb(skb);
+ return rc;
}
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1743,7 +1450,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
tipc_node_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
- l_ptr = n_ptr->links[i];
+ l_ptr = n_ptr->links[i].link;
if (l_ptr && !strcmp(l_ptr->name, link_name)) {
*bearer_id = i;
found_node = n_ptr;
@@ -1770,27 +1477,16 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
l_ptr->stats.recv_info = l_ptr->rcv_nxt;
}
-static void link_print(struct tipc_link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l, const char *str)
{
- struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
- struct tipc_bearer *b_ptr;
-
- rcu_read_lock();
- b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
- if (b_ptr)
- pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
- rcu_read_unlock();
-
- if (link_working_unknown(l_ptr))
- pr_cont(":WU\n");
- else if (link_reset_reset(l_ptr))
- pr_cont(":RR\n");
- else if (link_reset_unknown(l_ptr))
- pr_cont(":RU\n");
- else if (link_working_working(l_ptr))
- pr_cont(":WW\n");
- else
- pr_cont("\n");
+ struct sk_buff *hskb = skb_peek(&l->transmq);
+ u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+ u16 tail = l->snd_nxt - 1;
+
+ pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
+ pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
+ skb_queue_len(&l->transmq), head, tail,
+ skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
}
/* Parse and validate nested (link) properties valid for media, bearer and link
@@ -1865,7 +1561,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (!link) {
res = -EINVAL;
goto out;
@@ -1885,7 +1581,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
u32 tol;
tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
- link_set_supervision_props(link, tol);
+ link->tolerance = tol;
tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
}
if (props[TIPC_NLA_PROP_PRIO]) {
@@ -2055,10 +1751,11 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
for (i = *prev_link; i < MAX_BEARERS; i++) {
*prev_link = i;
- if (!node->links[i])
+ if (!node->links[i].link)
continue;
- err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
+ err = __tipc_nl_add_link(net, msg,
+ node->links[i].link, NLM_F_MULTI);
if (err)
return err;
}
@@ -2172,7 +1869,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (!link) {
tipc_node_unlock(node);
nlmsg_free(msg.skb);
@@ -2227,7 +1924,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (!link) {
tipc_node_unlock(node);
return -EINVAL;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index ae0a0ea57..39ff8b691 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -49,19 +49,25 @@
*/
#define INVALID_LINK_SEQ 0x10000
-/* Link working states
+/* Link FSM events:
*/
-#define WORKING_WORKING 560810u
-#define WORKING_UNKNOWN 560811u
-#define RESET_UNKNOWN 560812u
-#define RESET_RESET 560813u
+enum {
+ LINK_ESTABLISH_EVT = 0xec1ab1e,
+ LINK_PEER_RESET_EVT = 0x9eed0e,
+ LINK_FAILURE_EVT = 0xfa110e,
+ LINK_RESET_EVT = 0x10ca1d0e,
+ LINK_FAILOVER_BEGIN_EVT = 0xfa110bee,
+ LINK_FAILOVER_END_EVT = 0xfa110ede,
+ LINK_SYNCH_BEGIN_EVT = 0xc1ccbee,
+ LINK_SYNCH_END_EVT = 0xc1ccede
+};
-/* Link endpoint execution states
+/* Events returned from link at packet reception or at timeout
*/
-#define LINK_STARTED 0x0001
-#define LINK_STOPPED 0x0002
-#define LINK_SYNCHING 0x0004
-#define LINK_FAILINGOVER 0x0008
+enum {
+ TIPC_LINK_UP_EVT = 1,
+ TIPC_LINK_DOWN_EVT = (1 << 1)
+};
/* Starting value for maximum packet size negotiation on unicast links
* (unless bearer MTU is less)
@@ -106,7 +112,6 @@ struct tipc_stats {
* @timer: link timer
* @owner: pointer to peer node
* @refcnt: reference counter for permanent references (owner node & timer)
- * @flags: execution state flags for link endpoint instance
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
@@ -143,20 +148,17 @@ struct tipc_stats {
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
- struct tipc_media_addr media_addr;
- struct timer_list timer;
+ struct tipc_media_addr *media_addr;
struct tipc_node *owner;
- struct kref ref;
/* Management and link supervision data */
- unsigned int flags;
u32 peer_session;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
unsigned long keepalive_intv;
u32 abort_limit;
- int state;
+ u32 state;
u32 silent_intv_cnt;
struct {
unchar hdr[INT_H_SIZE];
@@ -165,12 +167,10 @@ struct tipc_link {
struct tipc_msg *pmsg;
u32 priority;
char net_plane;
- u16 synch_point;
- /* Failover */
- u16 failover_pkts;
- u16 failover_checkpt;
- struct sk_buff *failover_skb;
+ /* Failover/synch */
+ u16 drop_point;
+ struct sk_buff *failover_reasm_skb;
/* Max packet negotiation */
u16 mtu;
@@ -192,8 +192,8 @@ struct tipc_link {
u16 rcv_nxt;
u32 rcv_unacked;
struct sk_buff_head deferdq;
- struct sk_buff_head inputq;
- struct sk_buff_head namedq;
+ struct sk_buff_head *inputq;
+ struct sk_buff_head *namedq;
/* Congestion handling */
struct sk_buff_head wakeupq;
@@ -205,28 +205,29 @@ struct tipc_link {
struct tipc_stats stats;
};
-struct tipc_port;
-
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
- struct tipc_bearer *b_ptr,
- const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+ u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+ struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+ struct tipc_link **link);
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq);
+int tipc_link_fsm_evt(struct tipc_link *l, int evt);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
-int tipc_link_is_up(struct tipc_link *l_ptr);
+bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_synching(struct tipc_link *l);
+bool tipc_link_is_failingover(struct tipc_link *l);
+bool tipc_link_is_blocked(struct tipc_link *l);
int tipc_link_is_active(struct tipc_link *l_ptr);
void tipc_link_purge_queues(struct tipc_link *l_ptr);
void tipc_link_purge_backlog(struct tipc_link *l);
-void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
- u32 selector);
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
- u32 selector);
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
struct sk_buff_head *list);
+int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list,
+ struct sk_buff_head *xmitq);
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
u32 gap, u32 tolerance, u32 priority);
void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -242,34 +243,8 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
-void link_prepare_wakeup(struct tipc_link *l);
-
-static inline u32 link_own_addr(struct tipc_link *l)
-{
- return msg_prevnode(l->pmsg);
-}
-
-/*
- * Link status checking routines
- */
-static inline int link_working_working(struct tipc_link *l_ptr)
-{
- return l_ptr->state == WORKING_WORKING;
-}
-
-static inline int link_working_unknown(struct tipc_link *l_ptr)
-{
- return l_ptr->state == WORKING_UNKNOWN;
-}
-
-static inline int link_reset_unknown(struct tipc_link *l_ptr)
-{
- return l_ptr->state == RESET_UNKNOWN;
-}
-
-static inline int link_reset_reset(struct tipc_link *l_ptr)
-{
- return l_ptr->state == RESET_RESET;
-}
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 08b4cc7d4..5f7345015 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -121,7 +121,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
{
struct sk_buff *head = *headbuf;
struct sk_buff *frag = *buf;
- struct sk_buff *tail;
+ struct sk_buff *tail = NULL;
struct tipc_msg *msg;
u32 fragid;
int delta;
@@ -141,9 +141,15 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
goto err;
head = *headbuf = frag;
- skb_frag_list_init(head);
- TIPC_SKB_CB(head)->tail = NULL;
*buf = NULL;
+ TIPC_SKB_CB(head)->tail = NULL;
+ if (skb_is_nonlinear(head)) {
+ skb_walk_frags(head, tail) {
+ TIPC_SKB_CB(head)->tail = tail;
+ }
+ } else {
+ skb_frag_list_init(head);
+ }
return 0;
}
@@ -463,60 +469,72 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
- * @buf: buffer containing message to be reversed
- * @dnode: return value: node where to send message after reversal
- * @err: error code to be set in message
- * Consumes buffer if failure
+ * @own_node: originating node id for reversed message
+ * @skb: buffer containing message to be reversed; may be replaced.
+ * @err: error code to be set in message, if any
+ * Consumes buffer at failure
* Returns true if success, otherwise false
*/
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
- int err)
+bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
- struct tipc_msg *msg = buf_msg(buf);
+ struct sk_buff *_skb = *skb;
+ struct tipc_msg *hdr = buf_msg(_skb);
struct tipc_msg ohdr;
- uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+ int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
- if (skb_linearize(buf))
+ if (skb_linearize(_skb))
goto exit;
- msg = buf_msg(buf);
- if (msg_dest_droppable(msg))
+ hdr = buf_msg(_skb);
+ if (msg_dest_droppable(hdr))
goto exit;
- if (msg_errcode(msg))
+ if (msg_errcode(hdr))
goto exit;
- memcpy(&ohdr, msg, msg_hdr_sz(msg));
- msg_set_errcode(msg, err);
- msg_set_origport(msg, msg_destport(&ohdr));
- msg_set_destport(msg, msg_origport(&ohdr));
- msg_set_prevnode(msg, own_addr);
- if (!msg_short(msg)) {
- msg_set_orignode(msg, msg_destnode(&ohdr));
- msg_set_destnode(msg, msg_orignode(&ohdr));
+
+ /* Take a copy of original header before altering message */
+ memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
+
+ /* Never return SHORT header; expand by replacing buffer if necessary */
+ if (msg_short(hdr)) {
+ *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+ if (!*skb)
+ goto exit;
+ memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
+ kfree_skb(_skb);
+ _skb = *skb;
+ hdr = buf_msg(_skb);
+ memcpy(hdr, &ohdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
}
- msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
- skb_trim(buf, msg_size(msg));
- skb_orphan(buf);
- *dnode = msg_orignode(&ohdr);
+
+ /* Now reverse the concerned fields */
+ msg_set_errcode(hdr, err);
+ msg_set_origport(hdr, msg_destport(&ohdr));
+ msg_set_destport(hdr, msg_origport(&ohdr));
+ msg_set_destnode(hdr, msg_prevnode(&ohdr));
+ msg_set_prevnode(hdr, own_node);
+ msg_set_orignode(hdr, own_node);
+ msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
+ skb_trim(_skb, msg_size(hdr));
+ skb_orphan(_skb);
return true;
exit:
- kfree_skb(buf);
- *dnode = 0;
+ kfree_skb(_skb);
+ *skb = NULL;
return false;
}
/**
* tipc_msg_lookup_dest(): try to find new destination for named message
* @skb: the buffer containing the message.
- * @dnode: return value: next-hop node, if destination found
- * @err: return value: error code to use, if message to be rejected
+ * @err: error code to be used by caller if lookup fails
* Does not consume buffer
* Returns true if a destination is found, false otherwise
*/
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
- u32 *dnode, int *err)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
{
struct tipc_msg *msg = buf_msg(skb);
- u32 dport;
- u32 own_addr = tipc_own_addr(net);
+ u32 dport, dnode;
+ u32 onode = tipc_own_addr(net);
if (!msg_isdata(msg))
return false;
@@ -527,17 +545,18 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
*err = -TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
+ msg = buf_msg(skb);
if (msg_reroute_cnt(msg))
return false;
- *dnode = addr_domain(net, msg_lookup_scope(msg));
+ dnode = addr_domain(net, msg_lookup_scope(msg));
dport = tipc_nametbl_translate(net, msg_nametype(msg),
- msg_nameinst(msg), dnode);
+ msg_nameinst(msg), &dnode);
if (!dport)
return false;
msg_incr_reroute_cnt(msg);
- if (*dnode != own_addr)
- msg_set_prevnode(msg, own_addr);
- msg_set_destnode(msg, *dnode);
+ if (dnode != onode)
+ msg_set_prevnode(msg, onode);
+ msg_set_destnode(msg, dnode);
msg_set_destport(msg, dport);
*err = TIPC_OK;
return true;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 49f9a9648..5351a3f97 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -38,6 +38,7 @@
#define _TIPC_MSG_H
#include <linux/tipc.h>
+#include "core.h"
/*
* Constants and routines used to read and write TIPC payload message headers
@@ -109,7 +110,6 @@ struct tipc_skb_cb {
struct sk_buff *tail;
bool validated;
bool wakeup_pending;
- bool bundling;
u16 chain_sz;
u16 chain_imp;
};
@@ -558,15 +558,6 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
msg_set_bits(m, 1, 15, 0x1fff, n);
}
-static inline bool msg_dup(struct tipc_msg *m)
-{
- if (likely(msg_user(m) != TUNNEL_PROTOCOL))
- return false;
- if (msg_type(m) != SYNCH_MSG)
- return false;
- return true;
-}
-
/*
* Word 2
*/
@@ -620,12 +611,12 @@ static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
}
-static inline u32 msg_next_sent(struct tipc_msg *m)
+static inline u16 msg_next_sent(struct tipc_msg *m)
{
return msg_bits(m, 4, 0, 0xffff);
}
-static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 4, 0, 0xffff, n);
}
@@ -658,12 +649,12 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
/*
* Word 5
*/
-static inline u32 msg_session(struct tipc_msg *m)
+static inline u16 msg_session(struct tipc_msg *m)
{
return msg_bits(m, 5, 16, 0xffff);
}
-static inline void msg_set_session(struct tipc_msg *m, u32 n)
+static inline void msg_set_session(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 5, 16, 0xffff, n);
}
@@ -726,12 +717,12 @@ static inline char *msg_media_addr(struct tipc_msg *m)
/*
* Word 9
*/
-static inline u32 msg_msgcnt(struct tipc_msg *m)
+static inline u16 msg_msgcnt(struct tipc_msg *m)
{
return msg_bits(m, 9, 16, 0xffff);
}
-static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 9, 16, 0xffff, n);
}
@@ -766,10 +757,25 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 0, 0xffff, n);
}
+static inline bool msg_peer_link_is_up(struct tipc_msg *m)
+{
+ if (likely(msg_user(m) != LINK_PROTOCOL))
+ return true;
+ if (msg_type(m) == STATE_MSG)
+ return true;
+ return false;
+}
+
+static inline bool msg_peer_node_is_up(struct tipc_msg *m)
+{
+ if (msg_peer_link_is_up(m))
+ return true;
+ return msg_redundant_link(m);
+}
+
struct sk_buff *tipc_buf_acquire(u32 size);
bool tipc_msg_validate(struct sk_buff *skb);
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
- int err);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
u32 hsize, u32 destnode);
struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
@@ -782,8 +788,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
- int *err);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
static inline u16 buf_seqno(struct sk_buff *skb)
@@ -857,26 +862,65 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
return skb;
}
-/* tipc_skb_queue_tail(): add buffer to tail of list;
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
* @list: list to be appended to
- * @skb: buffer to append. Always appended
- * @dport: the destination port of the buffer
- * returns true if dport differs from previous destination
+ * @skb: buffer to add
+ * Returns true if queue should treated further, otherwise false
*/
-static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
- struct sk_buff *skb, u32 dport)
+static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
+ struct sk_buff *skb)
{
- struct sk_buff *_skb = NULL;
- bool rv = false;
+ struct sk_buff *_skb, *tmp;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u16 seqno = msg_seqno(hdr);
- spin_lock_bh(&list->lock);
- _skb = skb_peek_tail(list);
- if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
- (skb_queue_len(list) > 32))
- rv = true;
+ if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
+ __skb_queue_head(list, skb);
+ return true;
+ }
+ if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
+ __skb_queue_head(list, skb);
+ return true;
+ }
+ if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
+ skb_queue_walk_safe(list, _skb, tmp) {
+ if (likely(less(seqno, buf_seqno(_skb)))) {
+ __skb_queue_before(list, _skb, skb);
+ return true;
+ }
+ }
+ }
__skb_queue_tail(list, skb);
+ return false;
+}
+
+/* tipc_skb_queue_splice_tail - append an skb list to lock protected list
+ * @list: the new list to append. Not lock protected
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ spin_lock_bh(&head->lock);
+ skb_queue_splice_tail(list, head);
+ spin_unlock_bh(&head->lock);
+}
+
+/* tipc_skb_queue_splice_tail_init - merge two lock protected skb lists
+ * @list: the new list to add. Lock protected. Will be reinitialized
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ struct sk_buff_head tmp;
+
+ __skb_queue_head_init(&tmp);
+
+ spin_lock_bh(&list->lock);
+ skb_queue_splice_tail_init(list, &tmp);
spin_unlock_bh(&list->lock);
- return rv;
+ tipc_skb_queue_splice_tail(&tmp, head);
}
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 41e7b7e4d..e6018b7eb 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -96,13 +96,13 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
dnode = node->addr;
if (in_own_node(net, dnode))
continue;
- if (!tipc_node_active_links(node))
+ if (!tipc_node_is_up(node))
continue;
oskb = pskb_copy(skb, GFP_ATOMIC);
if (!oskb)
break;
msg_set_destnode(buf_msg(oskb), dnode);
- tipc_link_xmit_skb(net, oskb, dnode, dnode);
+ tipc_node_xmit_skb(net, oskb, dnode, dnode);
}
rcu_read_unlock();
@@ -223,7 +223,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
&tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
rcu_read_unlock();
- tipc_link_xmit(net, &head, dnode, dnode);
+ tipc_node_xmit(net, &head, dnode, dnode);
}
static void tipc_publ_subscribe(struct net *net, struct publication *publ,
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 53e0fee80..1eadc95e1 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -1114,7 +1114,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
+ if (len && !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0b1d61a5f..2c32a8303 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -40,10 +40,42 @@
#include "name_distr.h"
#include "socket.h"
#include "bcast.h"
+#include "discover.h"
-static void node_lost_contact(struct tipc_node *n_ptr);
+/* Node FSM states and events:
+ */
+enum {
+ SELF_DOWN_PEER_DOWN = 0xdd,
+ SELF_UP_PEER_UP = 0xaa,
+ SELF_DOWN_PEER_LEAVING = 0xd1,
+ SELF_UP_PEER_COMING = 0xac,
+ SELF_COMING_PEER_UP = 0xca,
+ SELF_LEAVING_PEER_DOWN = 0x1d,
+ NODE_FAILINGOVER = 0xf0,
+ NODE_SYNCHING = 0xcc
+};
+
+enum {
+ SELF_ESTABL_CONTACT_EVT = 0xece,
+ SELF_LOST_CONTACT_EVT = 0x1ce,
+ PEER_ESTABL_CONTACT_EVT = 0x9ece,
+ PEER_LOST_CONTACT_EVT = 0x91ce,
+ NODE_FAILOVER_BEGIN_EVT = 0xfbe,
+ NODE_FAILOVER_END_EVT = 0xfee,
+ NODE_SYNCH_BEGIN_EVT = 0xcbe,
+ NODE_SYNCH_END_EVT = 0xcee
+};
+
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr **maddr);
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
+ bool delete);
+static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
static void node_established_contact(struct tipc_node *n_ptr);
static void tipc_node_delete(struct tipc_node *node);
+static void tipc_node_timeout(unsigned long data);
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
struct tipc_sock_conn {
u32 port;
@@ -110,7 +142,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
return NULL;
}
-struct tipc_node *tipc_node_create(struct net *net, u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n_ptr, *temp_node;
@@ -126,12 +158,14 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
}
n_ptr->addr = addr;
n_ptr->net = net;
+ n_ptr->capabilities = capabilities;
kref_init(&n_ptr->kref);
spin_lock_init(&n_ptr->lock);
INIT_HLIST_NODE(&n_ptr->hash);
INIT_LIST_HEAD(&n_ptr->list);
INIT_LIST_HEAD(&n_ptr->publ_list);
INIT_LIST_HEAD(&n_ptr->conn_sks);
+ skb_queue_head_init(&n_ptr->bclink.namedq);
__skb_queue_head_init(&n_ptr->bclink.deferdq);
hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
@@ -139,14 +173,32 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
break;
}
list_add_tail_rcu(&n_ptr->list, &temp_node->list);
- n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
+ n_ptr->state = SELF_DOWN_PEER_LEAVING;
n_ptr->signature = INVALID_NODE_SIG;
+ n_ptr->active_links[0] = INVALID_BEARER_ID;
+ n_ptr->active_links[1] = INVALID_BEARER_ID;
tipc_node_get(n_ptr);
+ setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+ n_ptr->keepalive_intv = U32_MAX;
exit:
spin_unlock_bh(&tn->node_list_lock);
return n_ptr;
}
+static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
+{
+ unsigned long tol = l->tolerance;
+ unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+ unsigned long keepalive_intv = msecs_to_jiffies(intv);
+
+ /* Link with lowest tolerance determines timer interval */
+ if (keepalive_intv < n->keepalive_intv)
+ n->keepalive_intv = keepalive_intv;
+
+ /* Ensure link's abort limit corresponds to current interval */
+ l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
+}
+
static void tipc_node_delete(struct tipc_node *node)
{
list_del_rcu(&node->list);
@@ -160,8 +212,11 @@ void tipc_node_stop(struct net *net)
struct tipc_node *node, *t_node;
spin_lock_bh(&tn->node_list_lock);
- list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+ list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
+ if (del_timer(&node->timer))
+ tipc_node_put(node);
tipc_node_put(node);
+ }
spin_unlock_bh(&tn->node_list_lock);
}
@@ -219,158 +274,551 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
tipc_node_put(node);
}
+/* tipc_node_timeout - handle expiration of node timer
+ */
+static void tipc_node_timeout(unsigned long data)
+{
+ struct tipc_node *n = (struct tipc_node *)data;
+ struct tipc_link_entry *le;
+ struct sk_buff_head xmitq;
+ int bearer_id;
+ int rc = 0;
+
+ __skb_queue_head_init(&xmitq);
+
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ tipc_node_lock(n);
+ le = &n->links[bearer_id];
+ if (le->link) {
+ /* Link tolerance may change asynchronously: */
+ tipc_node_calculate_timer(n, le->link);
+ rc = tipc_link_timeout(le->link, &xmitq);
+ }
+ tipc_node_unlock(n);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+ if (rc & TIPC_LINK_DOWN_EVT)
+ tipc_node_link_down(n, bearer_id, false);
+ }
+ if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+ tipc_node_get(n);
+ tipc_node_put(n);
+}
+
/**
- * tipc_node_link_up - handle addition of link
- *
+ * __tipc_node_link_up - handle addition of link
+ * Node lock must be held by caller
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
+ struct sk_buff_head *xmitq)
{
- struct tipc_link **active = &n_ptr->active_links[0];
+ int *slot0 = &n->active_links[0];
+ int *slot1 = &n->active_links[1];
+ struct tipc_link *ol = node_active_link(n, 0);
+ struct tipc_link *nl = n->links[bearer_id].link;
- n_ptr->working_links++;
- n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
- n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+ if (!nl || !tipc_link_is_up(nl))
+ return;
- pr_debug("Established link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
+ n->working_links++;
+ n->action_flags |= TIPC_NOTIFY_LINK_UP;
+ n->link_id = nl->peer_bearer_id << 16 | bearer_id;
- if (!active[0]) {
- active[0] = active[1] = l_ptr;
- node_established_contact(n_ptr);
- goto exit;
- }
- if (l_ptr->priority < active[0]->priority) {
- pr_debug("New link <%s> becomes standby\n", l_ptr->name);
- goto exit;
+ /* Leave room for tunnel header when returning 'mtu' to users: */
+ n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
+
+ tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+
+ pr_debug("Established link <%s> on network plane %c\n",
+ nl->name, nl->net_plane);
+
+ /* First link? => give it both slots */
+ if (!ol) {
+ *slot0 = bearer_id;
+ *slot1 = bearer_id;
+ tipc_link_build_bcast_sync_msg(nl, xmitq);
+ node_established_contact(n);
+ return;
}
- tipc_link_dup_queue_xmit(active[0], l_ptr);
- if (l_ptr->priority == active[0]->priority) {
- active[0] = l_ptr;
- goto exit;
+
+ /* Second link => redistribute slots */
+ if (nl->priority > ol->priority) {
+ pr_debug("Old link <%s> becomes standby\n", ol->name);
+ *slot0 = bearer_id;
+ *slot1 = bearer_id;
+ } else if (nl->priority == ol->priority) {
+ *slot0 = bearer_id;
+ } else {
+ pr_debug("New link <%s> is standby\n", nl->name);
}
- pr_debug("Old link <%s> becomes standby\n", active[0]->name);
- if (active[1] != active[0])
- pr_debug("Old link <%s> becomes standby\n", active[1]->name);
- active[0] = active[1] = l_ptr;
-exit:
- /* Leave room for changeover header when returning 'mtu' to users: */
- n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
- n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+
+ /* Prepare synchronization with first link */
+ tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
}
/**
- * node_select_active_links - select active link
+ * tipc_node_link_up - handle addition of link
+ *
+ * Link becomes active (alone or shared) or standby, depending on its priority.
*/
-static void node_select_active_links(struct tipc_node *n_ptr)
+static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
+ struct sk_buff_head *xmitq)
{
- struct tipc_link **active = &n_ptr->active_links[0];
- u32 i;
- u32 highest_prio = 0;
+ tipc_node_lock(n);
+ __tipc_node_link_up(n, bearer_id, xmitq);
+ tipc_node_unlock(n);
+}
- active[0] = active[1] = NULL;
+/**
+ * __tipc_node_link_down - handle loss of link
+ */
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+ struct sk_buff_head *xmitq,
+ struct tipc_media_addr **maddr)
+{
+ struct tipc_link_entry *le = &n->links[*bearer_id];
+ int *slot0 = &n->active_links[0];
+ int *slot1 = &n->active_links[1];
+ int i, highest = 0;
+ struct tipc_link *l, *_l, *tnl;
+
+ l = n->links[*bearer_id].link;
+ if (!l || tipc_link_is_reset(l))
+ return;
- for (i = 0; i < MAX_BEARERS; i++) {
- struct tipc_link *l_ptr = n_ptr->links[i];
+ n->working_links--;
+ n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+ n->link_id = l->peer_bearer_id << 16 | *bearer_id;
- if (!l_ptr || !tipc_link_is_up(l_ptr) ||
- (l_ptr->priority < highest_prio))
- continue;
+ tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
- if (l_ptr->priority > highest_prio) {
- highest_prio = l_ptr->priority;
- active[0] = active[1] = l_ptr;
- } else {
- active[1] = l_ptr;
+ pr_debug("Lost link <%s> on network plane %c\n",
+ l->name, l->net_plane);
+
+ /* Select new active link if any available */
+ *slot0 = INVALID_BEARER_ID;
+ *slot1 = INVALID_BEARER_ID;
+ for (i = 0; i < MAX_BEARERS; i++) {
+ _l = n->links[i].link;
+ if (!_l || !tipc_link_is_up(_l))
+ continue;
+ if (_l == l)
+ continue;
+ if (_l->priority < highest)
+ continue;
+ if (_l->priority > highest) {
+ highest = _l->priority;
+ *slot0 = i;
+ *slot1 = i;
+ continue;
}
+ *slot1 = i;
+ }
+
+ if (!tipc_node_is_up(n)) {
+ tipc_link_reset(l);
+ node_lost_contact(n, &le->inputq);
+ return;
}
+
+ /* There is still a working link => initiate failover */
+ tnl = node_active_link(n, 0);
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+ n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
+ tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+ tipc_link_reset(l);
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+ *maddr = &n->links[tnl->bearer_id].maddr;
+ *bearer_id = tnl->bearer_id;
}
-/**
- * tipc_node_link_down - handle loss of link
- */
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
{
- struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
- struct tipc_link **active;
+ struct tipc_link_entry *le = &n->links[bearer_id];
+ struct tipc_media_addr *maddr;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+
+ tipc_node_lock(n);
+ __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+ if (delete && le->link) {
+ kfree(le->link);
+ le->link = NULL;
+ n->link_cnt--;
+ }
+ tipc_node_unlock(n);
- n_ptr->working_links--;
- n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
- n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ tipc_sk_rcv(n->net, &le->inputq);
+}
- if (!tipc_link_is_active(l_ptr)) {
- pr_debug("Lost standby link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
- return;
- }
- pr_debug("Lost link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->net_plane);
-
- active = &n_ptr->active_links[0];
- if (active[0] == l_ptr)
- active[0] = active[1];
- if (active[1] == l_ptr)
- active[1] = active[0];
- if (active[0] == l_ptr)
- node_select_active_links(n_ptr);
- if (tipc_node_is_up(n_ptr))
- tipc_link_failover_send_queue(l_ptr);
- else
- node_lost_contact(n_ptr);
-
- /* Leave room for changeover header when returning 'mtu' to users: */
- if (active[0]) {
- n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
- n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+bool tipc_node_is_up(struct tipc_node *n)
+{
+ return n->active_links[0] != INVALID_BEARER_ID;
+}
+
+void tipc_node_check_dest(struct net *net, u32 onode,
+ struct tipc_bearer *b,
+ u16 capabilities, u32 signature,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr)
+{
+ struct tipc_node *n;
+ struct tipc_link *l;
+ struct tipc_link_entry *le;
+ bool addr_match = false;
+ bool sign_match = false;
+ bool link_up = false;
+ bool accept_addr = false;
+ bool reset = true;
+
+ *dupl_addr = false;
+ *respond = false;
+
+ n = tipc_node_create(net, onode, capabilities);
+ if (!n)
return;
+
+ tipc_node_lock(n);
+
+ le = &n->links[b->identity];
+
+ /* Prepare to validate requesting node's signature and media address */
+ l = le->link;
+ link_up = l && tipc_link_is_up(l);
+ addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+ sign_match = (signature == n->signature);
+
+ /* These three flags give us eight permutations: */
+
+ if (sign_match && addr_match && link_up) {
+ /* All is fine. Do nothing. */
+ reset = false;
+ } else if (sign_match && addr_match && !link_up) {
+ /* Respond. The link will come up in due time */
+ *respond = true;
+ } else if (sign_match && !addr_match && link_up) {
+ /* Peer has changed i/f address without rebooting.
+ * If so, the link will reset soon, and the next
+ * discovery will be accepted. So we can ignore it.
+ * It may also be an cloned or malicious peer having
+ * chosen the same node address and signature as an
+ * existing one.
+ * Ignore requests until the link goes down, if ever.
+ */
+ *dupl_addr = true;
+ } else if (sign_match && !addr_match && !link_up) {
+ /* Peer link has changed i/f address without rebooting.
+ * It may also be a cloned or malicious peer; we can't
+ * distinguish between the two.
+ * The signature is correct, so we must accept.
+ */
+ accept_addr = true;
+ *respond = true;
+ } else if (!sign_match && addr_match && link_up) {
+ /* Peer node rebooted. Two possibilities:
+ * - Delayed re-discovery; this link endpoint has already
+ * reset and re-established contact with the peer, before
+ * receiving a discovery message from that node.
+ * (The peer happened to receive one from this node first).
+ * - The peer came back so fast that our side has not
+ * discovered it yet. Probing from this side will soon
+ * reset the link, since there can be no working link
+ * endpoint at the peer end, and the link will re-establish.
+ * Accept the signature, since it comes from a known peer.
+ */
+ n->signature = signature;
+ } else if (!sign_match && addr_match && !link_up) {
+ /* The peer node has rebooted.
+ * Accept signature, since it is a known peer.
+ */
+ n->signature = signature;
+ *respond = true;
+ } else if (!sign_match && !addr_match && link_up) {
+ /* Peer rebooted with new address, or a new/duplicate peer.
+ * Ignore until the link goes down, if ever.
+ */
+ *dupl_addr = true;
+ } else if (!sign_match && !addr_match && !link_up) {
+ /* Peer rebooted with new address, or it is a new peer.
+ * Accept signature and address.
+ */
+ n->signature = signature;
+ accept_addr = true;
+ *respond = true;
}
- /* Loopback link went down? No fragmentation needed from now on. */
- if (n_ptr->addr == tn->own_addr) {
- n_ptr->act_mtus[0] = MAX_MSG_SIZE;
- n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+
+ if (!accept_addr)
+ goto exit;
+
+ /* Now create new link if not already existing */
+ if (!l) {
+ if (n->link_cnt == 2) {
+ pr_warn("Cannot establish 3rd link to %x\n", n->addr);
+ goto exit;
+ }
+ if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
+ tipc_own_addr(net), onode, &le->maddr,
+ &le->inputq, &n->bclink.namedq, &l)) {
+ *respond = false;
+ goto exit;
+ }
+ tipc_link_reset(l);
+ if (n->state == NODE_FAILINGOVER)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ le->link = l;
+ n->link_cnt++;
+ tipc_node_calculate_timer(n, l);
+ if (n->link_cnt == 1)
+ if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+ tipc_node_get(n);
}
+ memcpy(&le->maddr, maddr, sizeof(*maddr));
+exit:
+ tipc_node_unlock(n);
+ if (reset)
+ tipc_node_link_down(n, b->identity, false);
+ tipc_node_put(n);
}
-int tipc_node_active_links(struct tipc_node *n_ptr)
+void tipc_node_delete_links(struct net *net, int bearer_id)
{
- return n_ptr->active_links[0] != NULL;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_node *n;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_link_down(n, bearer_id, true);
+ }
+ rcu_read_unlock();
}
-int tipc_node_is_up(struct tipc_node *n_ptr)
+static void tipc_node_reset_links(struct tipc_node *n)
{
- return tipc_node_active_links(n_ptr);
+ char addr_string[16];
+ int i;
+
+ pr_warn("Resetting all links to %s\n",
+ tipc_addr_string_fill(addr_string, n->addr));
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ tipc_node_link_down(n, i, false);
+ }
}
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+/* tipc_node_fsm_evt - node finite state machine
+ * Determines when contact is allowed with peer node
+ */
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
{
- n_ptr->links[l_ptr->bearer_id] = l_ptr;
- n_ptr->link_cnt++;
+ int state = n->state;
+
+ switch (state) {
+ case SELF_DOWN_PEER_DOWN:
+ switch (evt) {
+ case SELF_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_COMING;
+ break;
+ case PEER_ESTABL_CONTACT_EVT:
+ state = SELF_COMING_PEER_UP;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_UP_PEER_UP:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ state = NODE_SYNCHING;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ state = NODE_FAILINGOVER;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case NODE_SYNCH_END_EVT:
+ case NODE_FAILOVER_END_EVT:
+ break;
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_DOWN_PEER_LEAVING:
+ switch (evt) {
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case SELF_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_UP_PEER_COMING:
+ switch (evt) {
+ case PEER_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_COMING_PEER_UP:
+ switch (evt) {
+ case SELF_ESTABL_CONTACT_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case SELF_LOST_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case SELF_LEAVING_PEER_DOWN:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_DOWN;
+ break;
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ case PEER_LOST_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_END_EVT:
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_FAILOVER_BEGIN_EVT:
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case NODE_FAILINGOVER:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_FAILOVER_END_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ case NODE_SYNCH_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ case NODE_SYNCHING:
+ switch (evt) {
+ case SELF_LOST_CONTACT_EVT:
+ state = SELF_DOWN_PEER_LEAVING;
+ break;
+ case PEER_LOST_CONTACT_EVT:
+ state = SELF_LEAVING_PEER_DOWN;
+ break;
+ case NODE_SYNCH_END_EVT:
+ state = SELF_UP_PEER_UP;
+ break;
+ case NODE_FAILOVER_BEGIN_EVT:
+ state = NODE_FAILINGOVER;
+ break;
+ case NODE_SYNCH_BEGIN_EVT:
+ case SELF_ESTABL_CONTACT_EVT:
+ case PEER_ESTABL_CONTACT_EVT:
+ break;
+ case NODE_FAILOVER_END_EVT:
+ default:
+ goto illegal_evt;
+ }
+ break;
+ default:
+ pr_err("Unknown node fsm state %x\n", state);
+ break;
+ }
+ n->state = state;
+ return;
+
+illegal_evt:
+ pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
}
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
{
- int i;
+ int state = n->state;
- for (i = 0; i < MAX_BEARERS; i++) {
- if (l_ptr != n_ptr->links[i])
- continue;
- n_ptr->links[i] = NULL;
- n_ptr->link_cnt--;
+ if (likely(state == SELF_UP_PEER_UP))
+ return true;
+
+ if (state == SELF_LEAVING_PEER_DOWN)
+ return false;
+
+ if (state == SELF_DOWN_PEER_LEAVING) {
+ if (msg_peer_node_is_up(hdr))
+ return false;
}
+
+ return true;
}
static void node_established_contact(struct tipc_node *n_ptr)
{
+ tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
n_ptr->bclink.oos_state = 0;
n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
}
-static void node_lost_contact(struct tipc_node *n_ptr)
+static void node_lost_contact(struct tipc_node *n_ptr,
+ struct sk_buff_head *inputq)
{
char addr_string[16];
struct tipc_sock_conn *conn, *safe;
+ struct tipc_link *l;
struct list_head *conns = &n_ptr->conn_sks;
struct sk_buff *skb;
struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
@@ -396,21 +844,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Abort any ongoing link failover */
for (i = 0; i < MAX_BEARERS; i++) {
- struct tipc_link *l_ptr = n_ptr->links[i];
- if (!l_ptr)
- continue;
- l_ptr->flags &= ~LINK_FAILINGOVER;
- l_ptr->failover_checkpt = 0;
- l_ptr->failover_pkts = 0;
- kfree_skb(l_ptr->failover_skb);
- l_ptr->failover_skb = NULL;
- tipc_link_reset_fragments(l_ptr);
+ l = n_ptr->links[i].link;
+ if (l)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
}
- n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
-
/* Prevent re-contact with node until cleanup is done */
- n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
+ tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
/* Notify publications from this node */
n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
@@ -421,10 +861,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
SHORT_H_SIZE, 0, tn->own_addr,
conn->peer_node, conn->port,
conn->peer_port, TIPC_ERR_NO_NODE);
- if (likely(skb)) {
- skb_queue_tail(n_ptr->inputq, skb);
- n_ptr->action_flags |= TIPC_MSG_EVT;
- }
+ if (likely(skb))
+ skb_queue_tail(inputq, skb);
list_del(&conn->list);
kfree(conn);
}
@@ -453,7 +891,7 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
goto exit;
tipc_node_lock(node);
- link = node->links[bearer_id];
+ link = node->links[bearer_id].link;
if (link) {
strncpy(linkname, link->name, len);
err = 0;
@@ -471,27 +909,20 @@ void tipc_node_unlock(struct tipc_node *node)
u32 flags = node->action_flags;
u32 link_id = 0;
struct list_head *publ_list;
- struct sk_buff_head *inputq = node->inputq;
- struct sk_buff_head *namedq;
- if (likely(!flags || (flags == TIPC_MSG_EVT))) {
- node->action_flags = 0;
+ if (likely(!flags)) {
spin_unlock_bh(&node->lock);
- if (flags == TIPC_MSG_EVT)
- tipc_sk_rcv(net, inputq);
return;
}
addr = node->addr;
link_id = node->link_id;
- namedq = node->namedq;
publ_list = &node->publ_list;
- node->action_flags &= ~(TIPC_MSG_EVT |
- TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+ node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
- TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
+ TIPC_BCAST_RESET);
spin_unlock_bh(&node->lock);
@@ -512,17 +943,11 @@ void tipc_node_unlock(struct tipc_node *node)
tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
link_id, addr);
- if (flags & TIPC_MSG_EVT)
- tipc_sk_rcv(net, inputq);
-
- if (flags & TIPC_NAMED_MSG_EVT)
- tipc_named_rcv(net, namedq);
-
if (flags & TIPC_BCAST_MSG_EVT)
tipc_bclink_input(net);
if (flags & TIPC_BCAST_RESET)
- tipc_link_reset_all(node);
+ tipc_node_reset_links(node);
}
/* Caller should hold node lock for the passed node */
@@ -559,6 +984,290 @@ msg_full:
return -EMSGSIZE;
}
+static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
+ int *bearer_id,
+ struct tipc_media_addr **maddr)
+{
+ int id = n->active_links[sel & 1];
+
+ if (unlikely(id < 0))
+ return NULL;
+
+ *bearer_id = id;
+ *maddr = &n->links[id].maddr;
+ return n->links[id].link;
+}
+
+/**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+ * @list: chain of buffers containing message
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ */
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+ u32 dnode, int selector)
+{
+ struct tipc_link *l = NULL;
+ struct tipc_node *n;
+ struct sk_buff_head xmitq;
+ struct tipc_media_addr *maddr;
+ int bearer_id;
+ int rc = -EHOSTUNREACH;
+
+ __skb_queue_head_init(&xmitq);
+ n = tipc_node_find(net, dnode);
+ if (likely(n)) {
+ tipc_node_lock(n);
+ l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
+ if (likely(l))
+ rc = tipc_link_xmit(l, list, &xmitq);
+ tipc_node_unlock(n);
+ if (unlikely(rc == -ENOBUFS))
+ tipc_node_link_down(n, bearer_id, false);
+ tipc_node_put(n);
+ }
+ if (likely(!rc)) {
+ tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
+ return 0;
+ }
+ if (likely(in_own_node(net, dnode))) {
+ tipc_sk_rcv(net, list);
+ return 0;
+ }
+ return rc;
+}
+
+/* tipc_node_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+ u32 selector)
+{
+ struct sk_buff_head head;
+ int rc;
+
+ skb_queue_head_init(&head);
+ __skb_queue_tail(&head, skb);
+ rc = tipc_node_xmit(net, &head, dnode, selector);
+ if (rc == -ELINKCONG)
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * tipc_node_check_state - check and if necessary update node state
+ * @skb: TIPC packet
+ * @bearer_id: identity of bearer delivering the packet
+ * Returns true if state is ok, otherwise consumes buffer and returns false
+ */
+static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
+ int bearer_id, struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ int usr = msg_user(hdr);
+ int mtyp = msg_type(hdr);
+ u16 oseqno = msg_seqno(hdr);
+ u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
+ u16 exp_pkts = msg_msgcnt(hdr);
+ u16 rcv_nxt, syncpt, dlv_nxt;
+ int state = n->state;
+ struct tipc_link *l, *tnl, *pl = NULL;
+ struct tipc_media_addr *maddr;
+ int i, pb_id;
+
+ l = n->links[bearer_id].link;
+ if (!l)
+ return false;
+ rcv_nxt = l->rcv_nxt;
+
+
+ if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
+ return true;
+
+ /* Find parallel link, if any */
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if ((i != bearer_id) && n->links[i].link) {
+ pl = n->links[i].link;
+ break;
+ }
+ }
+
+ /* Update node accesibility if applicable */
+ if (state == SELF_UP_PEER_COMING) {
+ if (!tipc_link_is_up(l))
+ return true;
+ if (!msg_peer_link_is_up(hdr))
+ return true;
+ tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
+ }
+
+ if (state == SELF_DOWN_PEER_LEAVING) {
+ if (msg_peer_node_is_up(hdr))
+ return false;
+ tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+ }
+
+ /* Ignore duplicate packets */
+ if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
+ return true;
+
+ /* Initiate or update failover mode if applicable */
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
+ syncpt = oseqno + exp_pkts - 1;
+ if (pl && tipc_link_is_up(pl)) {
+ pb_id = pl->bearer_id;
+ __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+ tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
+ }
+ /* If pkts arrive out of order, use lowest calculated syncpt */
+ if (less(syncpt, n->sync_point))
+ n->sync_point = syncpt;
+ }
+
+ /* Open parallel link when tunnel link reaches synch point */
+ if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
+ if (!more(rcv_nxt, n->sync_point))
+ return true;
+ tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
+ if (pl)
+ tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
+ return true;
+ }
+
+ /* No synching needed if only one link */
+ if (!pl || !tipc_link_is_up(pl))
+ return true;
+
+ /* Initiate synch mode if applicable */
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
+ syncpt = iseqno + exp_pkts - 1;
+ if (!tipc_link_is_up(l)) {
+ tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+ __tipc_node_link_up(n, bearer_id, xmitq);
+ }
+ if (n->state == SELF_UP_PEER_UP) {
+ n->sync_point = syncpt;
+ tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
+ }
+ if (less(syncpt, n->sync_point))
+ n->sync_point = syncpt;
+ }
+
+ /* Open tunnel link when parallel link reaches synch point */
+ if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
+ if (tipc_link_is_synching(l)) {
+ tnl = l;
+ } else {
+ tnl = pl;
+ pl = l;
+ }
+ dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
+ if (more(dlv_nxt, n->sync_point)) {
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+ return true;
+ }
+ if (l == pl)
+ return true;
+ if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
+ return true;
+ if (usr == LINK_PROTOCOL)
+ return true;
+ return false;
+ }
+ return true;
+}
+
+/**
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+{
+ struct sk_buff_head xmitq;
+ struct tipc_node *n;
+ struct tipc_msg *hdr = buf_msg(skb);
+ int usr = msg_user(hdr);
+ int bearer_id = b->identity;
+ struct tipc_link_entry *le;
+ int rc = 0;
+
+ __skb_queue_head_init(&xmitq);
+
+ /* Ensure message is well-formed */
+ if (unlikely(!tipc_msg_validate(skb)))
+ goto discard;
+
+ /* Handle arrival of a non-unicast link packet */
+ if (unlikely(msg_non_seq(hdr))) {
+ if (usr == LINK_CONFIG)
+ tipc_disc_rcv(net, skb, b);
+ else
+ tipc_bclink_rcv(net, skb);
+ return;
+ }
+
+ /* Locate neighboring node that sent packet */
+ n = tipc_node_find(net, msg_prevnode(hdr));
+ if (unlikely(!n))
+ goto discard;
+ le = &n->links[bearer_id];
+
+ tipc_node_lock(n);
+
+ /* Is reception permitted at the moment ? */
+ if (!tipc_node_filter_pkt(n, hdr))
+ goto unlock;
+
+ if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+ tipc_bclink_sync_state(n, hdr);
+
+ /* Release acked broadcast packets */
+ if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
+ tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
+
+ /* Check and if necessary update node state */
+ if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
+ rc = tipc_link_rcv(le->link, skb, &xmitq);
+ skb = NULL;
+ }
+unlock:
+ tipc_node_unlock(n);
+
+ if (unlikely(rc & TIPC_LINK_UP_EVT))
+ tipc_node_link_up(n, bearer_id, &xmitq);
+
+ if (unlikely(rc & TIPC_LINK_DOWN_EVT))
+ tipc_node_link_down(n, bearer_id, false);
+
+ if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
+ tipc_named_rcv(net, &n->bclink.namedq);
+
+ if (!skb_queue_empty(&le->inputq))
+ tipc_sk_rcv(net, &le->inputq);
+
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+ tipc_node_put(n);
+discard:
+ kfree_skb(skb);
+}
+
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5a834cf14..344b3e759 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,23 +45,19 @@
/* Out-of-range value for node signature */
#define INVALID_NODE_SIG 0x10000
+#define INVALID_BEARER_ID -1
+
/* Flags used to take different actions according to flag type
- * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
- * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
* TIPC_NOTIFY_NODE_DOWN: notify node is down
* TIPC_NOTIFY_NODE_UP: notify node is up
* TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
*/
enum {
- TIPC_MSG_EVT = 1,
- TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
- TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
TIPC_NOTIFY_NODE_DOWN = (1 << 3),
TIPC_NOTIFY_NODE_UP = (1 << 4),
TIPC_WAKEUP_BCAST_USERS = (1 << 5),
TIPC_NOTIFY_LINK_UP = (1 << 6),
TIPC_NOTIFY_LINK_DOWN = (1 << 7),
- TIPC_NAMED_MSG_EVT = (1 << 8),
TIPC_BCAST_MSG_EVT = (1 << 9),
TIPC_BCAST_RESET = (1 << 10)
};
@@ -85,10 +81,17 @@ struct tipc_node_bclink {
u32 deferred_size;
struct sk_buff_head deferdq;
struct sk_buff *reasm_buf;
- int inputq_map;
+ struct sk_buff_head namedq;
bool recv_permitted;
};
+struct tipc_link_entry {
+ struct tipc_link *link;
+ u32 mtu;
+ struct sk_buff_head inputq;
+ struct tipc_media_addr maddr;
+};
+
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
@@ -98,11 +101,12 @@ struct tipc_node_bclink {
* @hash: links to adjacent nodes in unsorted hash chain
* @inputq: pointer to input queue containing messages for msg event
* @namedq: pointer to name table input queue with name table messages
- * @curr_link: the link holding the node lock, if any
- * @active_links: pointers to active links to node
- * @links: pointers to all links to node
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
* @action_flags: bit mask of different types of node actions
* @bclink: broadcast-related info
+ * @state: connectivity state vs peer node
+ * @sync_point: sequence number where synch/failover is finished
* @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
* @link_cnt: number of links to node
@@ -118,14 +122,13 @@ struct tipc_node {
spinlock_t lock;
struct net *net;
struct hlist_node hash;
- struct sk_buff_head *inputq;
- struct sk_buff_head *namedq;
- struct tipc_link *active_links[2];
- u32 act_mtus[2];
- struct tipc_link *links[MAX_BEARERS];
+ int active_links[2];
+ struct tipc_link_entry links[MAX_BEARERS];
int action_flags;
struct tipc_node_bclink bclink;
struct list_head list;
+ int state;
+ u16 sync_point;
int link_cnt;
u16 working_links;
u16 capabilities;
@@ -133,25 +136,32 @@ struct tipc_node {
u32 link_id;
struct list_head publ_list;
struct list_head conn_sks;
+ unsigned long keepalive_intv;
+ struct timer_list timer;
struct rcu_head rcu;
};
struct tipc_node *tipc_node_find(struct net *net, u32 addr);
void tipc_node_put(struct tipc_node *node);
-struct tipc_node *tipc_node_create(struct net *net, u32 addr);
void tipc_node_stop(struct net *net);
+void tipc_node_check_dest(struct net *net, u32 onode,
+ struct tipc_bearer *bearer,
+ u16 capabilities, u32 signature,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-int tipc_node_active_links(struct tipc_node *n_ptr);
-int tipc_node_is_up(struct tipc_node *n_ptr);
+bool tipc_node_is_up(struct tipc_node *n);
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
char *linkname, size_t len);
void tipc_node_unlock(struct tipc_node *node);
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+ int selector);
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+ u32 selector);
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
static inline void tipc_node_lock(struct tipc_node *node)
@@ -159,26 +169,30 @@ static inline void tipc_node_lock(struct tipc_node *node)
spin_lock_bh(&node->lock);
}
-static inline bool tipc_node_blocked(struct tipc_node *node)
+static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
{
- return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
- TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
+ int bearer_id = n->active_links[sel & 1];
+
+ if (unlikely(bearer_id == INVALID_BEARER_ID))
+ return NULL;
+
+ return n->links[bearer_id].link;
}
-static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
+static inline unsigned int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
{
- struct tipc_node *node;
- u32 mtu;
-
- node = tipc_node_find(net, addr);
+ struct tipc_node *n;
+ int bearer_id;
+ unsigned int mtu = MAX_MSG_SIZE;
- if (likely(node)) {
- mtu = node->act_mtus[selector & 1];
- tipc_node_put(node);
- } else {
- mtu = MAX_MSG_SIZE;
- }
+ n = tipc_node_find(net, addr);
+ if (unlikely(!n))
+ return mtu;
+ bearer_id = n->active_links[sel & 1];
+ if (likely(bearer_id != INVALID_BEARER_ID))
+ mtu = n->links[bearer_id].mtu;
+ tipc_node_put(n);
return mtu;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3a7567f69..1060d52ff 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -248,6 +248,22 @@ static void tsk_advance_rx_queue(struct sock *sk)
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+ u32 selector;
+ u32 dnode;
+ u32 onode = tipc_own_addr(sock_net(sk));
+
+ if (!tipc_msg_reverse(onode, &skb, err))
+ return;
+
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
/**
* tsk_rej_rx_queue - reject all buffers in socket receive queue
*
@@ -256,13 +272,9 @@ static void tsk_advance_rx_queue(struct sock *sk)
static void tsk_rej_rx_queue(struct sock *sk)
{
struct sk_buff *skb;
- u32 dnode;
- u32 own_node = tsk_own_node(tipc_sk(sk));
- while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
- if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
- tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
- }
+ while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+ tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
}
/* tsk_peer_msg - verify if message was sent by connected port's peer
@@ -441,9 +453,7 @@ static int tipc_release(struct socket *sock)
tsk->connected = 0;
tipc_node_remove_conn(net, dnode, tsk->portid);
}
- if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
- TIPC_ERR_NO_PORT))
- tipc_link_xmit_skb(net, skb, dnode, 0);
+ tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
}
}
@@ -456,7 +466,7 @@ static int tipc_release(struct socket *sock)
tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, TIPC_ERR_NO_PORT);
if (skb)
- tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
tipc_node_remove_conn(net, dnode, tsk->portid);
}
@@ -686,21 +696,22 @@ new_mtu:
do {
rc = tipc_bclink_xmit(net, pktchain);
- if (likely(rc >= 0)) {
- rc = dsz;
- break;
+ if (likely(!rc))
+ return dsz;
+
+ if (rc == -ELINKCONG) {
+ tsk->link_cong = 1;
+ rc = tipc_wait_for_sndmsg(sock, &timeo);
+ if (!rc)
+ continue;
}
+ __skb_queue_purge(pktchain);
if (rc == -EMSGSIZE) {
msg->msg_iter = save;
goto new_mtu;
}
- if (rc != -ELINKCONG)
- break;
- tipc_sk(sk)->link_cong = 1;
- rc = tipc_wait_for_sndmsg(sock, &timeo);
- if (rc)
- __skb_queue_purge(pktchain);
- } while (!rc);
+ break;
+ } while (1);
return rc;
}
@@ -763,35 +774,35 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
/**
* tipc_sk_proto_rcv - receive a connection mng protocol message
* @tsk: receiving socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
+ * @skb: pointer to message buffer.
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
{
- struct tipc_msg *msg = buf_msg(*skb);
+ struct sock *sk = &tsk->sk;
+ struct tipc_msg *hdr = buf_msg(skb);
+ int mtyp = msg_type(hdr);
int conn_cong;
- u32 dnode;
- u32 own_node = tsk_own_node(tsk);
+
/* Ignore if connection cannot be validated: */
- if (!tsk_peer_msg(tsk, msg))
+ if (!tsk_peer_msg(tsk, hdr))
goto exit;
tsk->probing_state = TIPC_CONN_OK;
- if (msg_type(msg) == CONN_ACK) {
+ if (mtyp == CONN_PROBE) {
+ msg_set_type(hdr, CONN_PROBE_REPLY);
+ tipc_sk_respond(sk, skb, TIPC_OK);
+ return;
+ } else if (mtyp == CONN_ACK) {
conn_cong = tsk_conn_cong(tsk);
- tsk->sent_unacked -= msg_msgcnt(msg);
+ tsk->sent_unacked -= msg_msgcnt(hdr);
if (conn_cong)
- tsk->sk.sk_write_space(&tsk->sk);
- } else if (msg_type(msg) == CONN_PROBE) {
- if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
- msg_set_type(msg, CONN_PROBE_REPLY);
- return;
- }
+ sk->sk_write_space(sk);
+ } else if (mtyp != CONN_PROBE_REPLY) {
+ pr_warn("Received unknown CONN_PROTO msg\n");
}
- /* Do nothing if msg_type() == CONN_PROBE_REPLY */
exit:
- kfree_skb(*skb);
- *skb = NULL;
+ kfree_skb(skb);
}
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -924,24 +935,25 @@ new_mtu:
do {
skb = skb_peek(pktchain);
TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
- rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
- if (likely(rc >= 0)) {
+ rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+ if (likely(!rc)) {
if (sock->state != SS_READY)
sock->state = SS_CONNECTING;
- rc = dsz;
- break;
+ return dsz;
}
+ if (rc == -ELINKCONG) {
+ tsk->link_cong = 1;
+ rc = tipc_wait_for_sndmsg(sock, &timeo);
+ if (!rc)
+ continue;
+ }
+ __skb_queue_purge(pktchain);
if (rc == -EMSGSIZE) {
m->msg_iter = save;
goto new_mtu;
}
- if (rc != -ELINKCONG)
- break;
- tsk->link_cong = 1;
- rc = tipc_wait_for_sndmsg(sock, &timeo);
- if (rc)
- __skb_queue_purge(pktchain);
- } while (!rc);
+ break;
+ } while (1);
return rc;
}
@@ -1043,15 +1055,16 @@ next:
return rc;
do {
if (likely(!tsk_conn_cong(tsk))) {
- rc = tipc_link_xmit(net, pktchain, dnode, portid);
+ rc = tipc_node_xmit(net, pktchain, dnode, portid);
if (likely(!rc)) {
tsk->sent_unacked++;
sent += send;
if (sent == dsz)
- break;
+ return dsz;
goto next;
}
if (rc == -EMSGSIZE) {
+ __skb_queue_purge(pktchain);
tsk->max_pkt = tipc_node_get_mtu(net, dnode,
portid);
m->msg_iter = save;
@@ -1059,13 +1072,13 @@ next:
}
if (rc != -ELINKCONG)
break;
+
tsk->link_cong = 1;
}
rc = tipc_wait_for_sndpkt(sock, &timeo);
- if (rc)
- __skb_queue_purge(pktchain);
} while (!rc);
+ __skb_queue_purge(pktchain);
return sent ? sent : rc;
}
@@ -1221,7 +1234,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
return;
msg = buf_msg(skb);
msg_set_msgcnt(msg, ack);
- tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+ tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
}
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1507,82 +1520,81 @@ static void tipc_data_ready(struct sock *sk)
* @tsk: TIPC socket
* @skb: pointer to message buffer. Set to NULL if buffer is consumed
*
- * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns true if everything ok, false otherwise
*/
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
+static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct socket *sock = sk->sk_socket;
- struct tipc_msg *msg = buf_msg(*skb);
- int retval = -TIPC_ERR_NO_PORT;
+ struct tipc_msg *hdr = buf_msg(skb);
- if (msg_mcast(msg))
- return retval;
+ if (unlikely(msg_mcast(hdr)))
+ return false;
switch ((int)sock->state) {
case SS_CONNECTED:
+
/* Accept only connection-based messages sent by peer */
- if (tsk_peer_msg(tsk, msg)) {
- if (unlikely(msg_errcode(msg))) {
- sock->state = SS_DISCONNECTING;
- tsk->connected = 0;
- /* let timer expire on it's own */
- tipc_node_remove_conn(net, tsk_peer_node(tsk),
- tsk->portid);
- }
- retval = TIPC_OK;
+ if (unlikely(!tsk_peer_msg(tsk, hdr)))
+ return false;
+
+ if (unlikely(msg_errcode(hdr))) {
+ sock->state = SS_DISCONNECTING;
+ tsk->connected = 0;
+ /* Let timer expire on it's own */
+ tipc_node_remove_conn(net, tsk_peer_node(tsk),
+ tsk->portid);
}
- break;
+ return true;
+
case SS_CONNECTING:
- /* Accept only ACK or NACK message */
- if (unlikely(!msg_connected(msg)))
- break;
+ /* Accept only ACK or NACK message */
+ if (unlikely(!msg_connected(hdr)))
+ return false;
- if (unlikely(msg_errcode(msg))) {
+ if (unlikely(msg_errcode(hdr))) {
sock->state = SS_DISCONNECTING;
sk->sk_err = ECONNREFUSED;
- retval = TIPC_OK;
- break;
+ return true;
}
- if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
+ if (unlikely(!msg_isdata(hdr))) {
sock->state = SS_DISCONNECTING;
sk->sk_err = EINVAL;
- retval = TIPC_OK;
- break;
+ return true;
}
- tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
- msg_set_importance(&tsk->phdr, msg_importance(msg));
+ tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
+ msg_set_importance(&tsk->phdr, msg_importance(hdr));
sock->state = SS_CONNECTED;
- /* If an incoming message is an 'ACK-', it should be
- * discarded here because it doesn't contain useful
- * data. In addition, we should try to wake up
- * connect() routine if sleeping.
- */
- if (msg_data_sz(msg) == 0) {
- kfree_skb(*skb);
- *skb = NULL;
- if (waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible(sk_sleep(sk));
- }
- retval = TIPC_OK;
- break;
+ /* If 'ACK+' message, add to socket receive queue */
+ if (msg_data_sz(hdr))
+ return true;
+
+ /* If empty 'ACK-' message, wake up sleeping connect() */
+ if (waitqueue_active(sk_sleep(sk)))
+ wake_up_interruptible(sk_sleep(sk));
+
+ /* 'ACK-' message is neither accepted nor rejected: */
+ msg_set_dest_droppable(hdr, 1);
+ return false;
+
case SS_LISTENING:
case SS_UNCONNECTED:
+
/* Accept only SYN message */
- if (!msg_connected(msg) && !(msg_errcode(msg)))
- retval = TIPC_OK;
+ if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+ return true;
break;
case SS_DISCONNECTING:
break;
default:
pr_err("Unknown socket state %u\n", sock->state);
}
- return retval;
+ return false;
}
/**
@@ -1617,61 +1629,70 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
/**
* filter_rcv - validate incoming message
* @sk: socket
- * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @skb: pointer to message.
*
* Enqueues message on receive queue if acceptable; optionally handles
* disconnect indication for a connected socket.
*
* Called with socket lock already taken
*
- * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
+ * Returns true if message was added to socket receive queue, otherwise false
*/
-static int filter_rcv(struct sock *sk, struct sk_buff **skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_msg *msg = buf_msg(*skb);
- unsigned int limit = rcvbuf_limit(sk, *skb);
- int rc = TIPC_OK;
+ struct tipc_msg *hdr = buf_msg(skb);
+ unsigned int limit = rcvbuf_limit(sk, skb);
+ int err = TIPC_OK;
+ int usr = msg_user(hdr);
- if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+ if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
tipc_sk_proto_rcv(tsk, skb);
- return TIPC_OK;
+ return false;
}
- if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
- kfree_skb(*skb);
+ if (unlikely(usr == SOCK_WAKEUP)) {
+ kfree_skb(skb);
tsk->link_cong = 0;
sk->sk_write_space(sk);
- *skb = NULL;
- return TIPC_OK;
+ return false;
}
- /* Reject message if it is wrong sort of message for socket */
- if (msg_type(msg) > TIPC_DIRECT_MSG)
- return -TIPC_ERR_NO_PORT;
+ /* Drop if illegal message type */
+ if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
+ kfree_skb(skb);
+ return false;
+ }
- if (sock->state == SS_READY) {
- if (msg_connected(msg))
- return -TIPC_ERR_NO_PORT;
- } else {
- rc = filter_connect(tsk, skb);
- if (rc != TIPC_OK || !*skb)
- return rc;
+ /* Reject if wrong message type for current socket state */
+ if (unlikely(sock->state == SS_READY)) {
+ if (msg_connected(hdr)) {
+ err = TIPC_ERR_NO_PORT;
+ goto reject;
+ }
+ } else if (unlikely(!filter_connect(tsk, skb))) {
+ err = TIPC_ERR_NO_PORT;
+ goto reject;
}
/* Reject message if there isn't room to queue it */
- if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
- return -TIPC_ERR_OVERLOAD;
+ if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
+ err = TIPC_ERR_OVERLOAD;
+ goto reject;
+ }
/* Enqueue message */
- TIPC_SKB_CB(*skb)->handle = NULL;
- __skb_queue_tail(&sk->sk_receive_queue, *skb);
- skb_set_owner_r(*skb, sk);
+ TIPC_SKB_CB(skb)->handle = NULL;
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ skb_set_owner_r(skb, sk);
sk->sk_data_ready(sk);
- *skb = NULL;
- return TIPC_OK;
+ return true;
+
+reject:
+ tipc_sk_respond(sk, skb, err);
+ return false;
}
/**
@@ -1685,22 +1706,10 @@ static int filter_rcv(struct sock *sk, struct sk_buff **skb)
*/
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
- int err;
- atomic_t *dcnt;
- u32 dnode;
- struct tipc_sock *tsk = tipc_sk(sk);
- struct net *net = sock_net(sk);
- uint truesize = skb->truesize;
+ unsigned int truesize = skb->truesize;
- err = filter_rcv(sk, &skb);
- if (likely(!skb)) {
- dcnt = &tsk->dupl_rcvcnt;
- if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
- atomic_add(truesize, dcnt);
- return 0;
- }
- if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
- tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ if (likely(filter_rcv(sk, skb)))
+ atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
return 0;
}
@@ -1710,45 +1719,43 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
* @inputq: list of incoming buffers with potentially different destinations
* @sk: socket where the buffers should be enqueued
* @dport: port number for the socket
- * @_skb: returned buffer to be forwarded or rejected, if applicable
*
* Caller must hold socket lock
- *
- * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
- * or -TIPC_ERR_NO_PORT
*/
-static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
- u32 dport, struct sk_buff **_skb)
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+ u32 dport)
{
unsigned int lim;
atomic_t *dcnt;
- int err;
struct sk_buff *skb;
unsigned long time_limit = jiffies + 2;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
- return TIPC_OK;
+ return;
+
skb = tipc_skb_dequeue(inputq, dport);
if (unlikely(!skb))
- return TIPC_OK;
+ return;
+
+ /* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- err = filter_rcv(sk, &skb);
- if (likely(!skb))
- continue;
- *_skb = skb;
- return err;
+ filter_rcv(sk, skb);
+ continue;
}
+
+ /* Try backlog, compensating for double-counted bytes */
dcnt = &tipc_sk(sk)->dupl_rcvcnt;
if (sk->sk_backlog.len)
atomic_set(dcnt, 0);
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
if (likely(!sk_add_backlog(sk, skb, lim)))
continue;
- *_skb = skb;
- return -TIPC_ERR_OVERLOAD;
+
+ /* Overload => reject message back to sender */
+ tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+ break;
}
- return TIPC_OK;
}
/**
@@ -1756,49 +1763,46 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
* @inputq: buffer list containing the buffers
* Consumes all buffers in list until inputq is empty
* Note: may be called in multiple threads referring to the same queue
- * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
- * Only node local calls check the return value, sending single-buffer queues
*/
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
u32 dnode, dport = 0;
int err;
- struct sk_buff *skb;
struct tipc_sock *tsk;
- struct tipc_net *tn;
struct sock *sk;
+ struct sk_buff *skb;
while (skb_queue_len(inputq)) {
- err = -TIPC_ERR_NO_PORT;
- skb = NULL;
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
+
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
- err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+ tipc_sk_enqueue(inputq, sk, dport);
spin_unlock_bh(&sk->sk_lock.slock);
- dport = 0;
}
sock_put(sk);
- } else {
- skb = tipc_skb_dequeue(inputq, dport);
- }
- if (likely(!skb))
continue;
- if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
- goto xmit;
- if (!err) {
- dnode = msg_destnode(buf_msg(skb));
- goto xmit;
}
- tn = net_generic(net, tipc_net_id);
- if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+
+ /* No destination socket => dequeue skb if still there */
+ skb = tipc_skb_dequeue(inputq, dport);
+ if (!skb)
+ return;
+
+ /* Try secondary lookup if unresolved named message */
+ err = TIPC_ERR_NO_PORT;
+ if (tipc_msg_lookup_dest(net, skb, &err))
+ goto xmit;
+
+ /* Prepare for message rejection */
+ if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
continue;
xmit:
- tipc_link_xmit_skb(net, skb, dnode, dport);
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
}
- return err ? -EHOSTUNREACH : 0;
}
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2067,7 +2071,10 @@ static int tipc_shutdown(struct socket *sock, int how)
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct sk_buff *skb;
- u32 dnode;
+ u32 dnode = tsk_peer_node(tsk);
+ u32 dport = tsk_peer_port(tsk);
+ u32 onode = tipc_own_addr(net);
+ u32 oport = tsk->portid;
int res;
if (how != SHUT_RDWR)
@@ -2080,6 +2087,8 @@ static int tipc_shutdown(struct socket *sock, int how)
case SS_CONNECTED:
restart:
+ dnode = tsk_peer_node(tsk);
+
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
skb = __skb_dequeue(&sk->sk_receive_queue);
if (skb) {
@@ -2087,19 +2096,13 @@ restart:
kfree_skb(skb);
goto restart;
}
- if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
- TIPC_CONN_SHUTDOWN))
- tipc_link_xmit_skb(net, skb, dnode,
- tsk->portid);
+ tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
} else {
- dnode = tsk_peer_node(tsk);
-
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE,
- 0, dnode, tsk_own_node(tsk),
- tsk_peer_port(tsk),
- tsk->portid, TIPC_CONN_SHUTDOWN);
- tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+ 0, dnode, onode, dport, oport,
+ TIPC_CONN_SHUTDOWN);
+ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
}
tsk->connected = 0;
sock->state = SS_DISCONNECTING;
@@ -2161,7 +2164,7 @@ static void tipc_sk_timeout(unsigned long data)
}
bh_unlock_sock(sk);
if (skb)
- tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
+ tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
exit:
sock_put(sk);
}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index bf6551389..4241f2206 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,7 +44,7 @@
SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
int tipc_socket_init(void);
void tipc_socket_stop(void);
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct sk_buff_head *inputq);
void tipc_sk_reinit(struct net *net);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 66deebc66..cd7c5f131 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -48,10 +48,13 @@
#include <linux/tipc_netlink.h>
#include "core.h"
#include "bearer.h"
+#include "msg.h"
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
+#define UDP_MIN_HEADROOM 28
+
static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
[TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
[TIPC_NLA_UDP_LOCAL] = {.type = NLA_BINARY,
@@ -156,6 +159,9 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
struct sk_buff *clone;
struct rtable *rt;
+ if (skb_headroom(skb) < UDP_MIN_HEADROOM)
+ pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+
clone = skb_clone(skb, GFP_ATOMIC);
skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
ub = rcu_dereference_rtnl(b->media_ptr);
@@ -194,7 +200,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
.saddr = src->ipv6,
.flowi6_proto = IPPROTO_UDP
};
- err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+ err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+ &fl6);
if (err)
goto tx_error;
ttl = ip6_dst_hoplimit(ndst);
@@ -216,6 +223,10 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
{
struct udp_bearer *ub;
struct tipc_bearer *b;
+ int usr = msg_user(buf_msg(skb));
+
+ if ((usr == LINK_PROTOCOL) || (usr == NAME_DISTRIBUTOR))
+ skb_linearize(skb);
ub = rcu_dereference_sk_user_data(sk);
if (!ub) {
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index df5fc6b34..00e8a349a 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1948,13 +1948,13 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
err = misc_register(&vsock_device);
if (err) {
pr_err("Failed to register misc device\n");
- return -ENOENT;
+ goto err_reset_transport;
}
err = proto_register(&vsock_proto, 1); /* we want our slab */
if (err) {
pr_err("Cannot register vsock protocol\n");
- goto err_misc_deregister;
+ goto err_deregister_misc;
}
err = sock_register(&vsock_family_ops);
@@ -1969,8 +1969,9 @@ int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
err_unregister_proto:
proto_unregister(&vsock_proto);
-err_misc_deregister:
+err_deregister_misc:
misc_deregister(&vsock_device);
+err_reset_transport:
transport = NULL;
err_busy:
mutex_unlock(&vsock_register_mutex);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 1f63daff3..7555cad83 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -40,13 +40,11 @@
static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
-static void vmci_transport_peer_attach_cb(u32 sub_id,
- const struct vmci_event_data *ed,
- void *client_data);
static void vmci_transport_peer_detach_cb(u32 sub_id,
const struct vmci_event_data *ed,
void *client_data);
static void vmci_transport_recv_pkt_work(struct work_struct *work);
+static void vmci_transport_cleanup(struct work_struct *work);
static int vmci_transport_recv_listen(struct sock *sk,
struct vmci_transport_packet *pkt);
static int vmci_transport_recv_connecting_server(
@@ -75,6 +73,10 @@ struct vmci_transport_recv_pkt_info {
struct vmci_transport_packet pkt;
};
+static LIST_HEAD(vmci_transport_cleanup_list);
+static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
+static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
+
static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
VMCI_INVALID_ID };
static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
@@ -791,44 +793,6 @@ out:
return err;
}
-static void vmci_transport_peer_attach_cb(u32 sub_id,
- const struct vmci_event_data *e_data,
- void *client_data)
-{
- struct sock *sk = client_data;
- const struct vmci_event_payload_qp *e_payload;
- struct vsock_sock *vsk;
-
- e_payload = vmci_event_data_const_payload(e_data);
-
- vsk = vsock_sk(sk);
-
- /* We don't ask for delayed CBs when we subscribe to this event (we
- * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
- * guarantees in that case about what context we might be running in,
- * so it could be BH or process, blockable or non-blockable. So we
- * need to account for all possible contexts here.
- */
- local_bh_disable();
- bh_lock_sock(sk);
-
- /* XXX This is lame, we should provide a way to lookup sockets by
- * qp_handle.
- */
- if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
- e_payload->handle)) {
- /* XXX This doesn't do anything, but in the future we may want
- * to set a flag here to verify the attach really did occur and
- * we weren't just sent a datagram claiming it was.
- */
- goto out;
- }
-
-out:
- bh_unlock_sock(sk);
- local_bh_enable();
-}
-
static void vmci_transport_handle_detach(struct sock *sk)
{
struct vsock_sock *vsk;
@@ -871,28 +835,38 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
const struct vmci_event_data *e_data,
void *client_data)
{
- struct sock *sk = client_data;
+ struct vmci_transport *trans = client_data;
const struct vmci_event_payload_qp *e_payload;
- struct vsock_sock *vsk;
e_payload = vmci_event_data_const_payload(e_data);
- vsk = vsock_sk(sk);
- if (vmci_handle_is_invalid(e_payload->handle))
- return;
-
- /* Same rules for locking as for peer_attach_cb(). */
- local_bh_disable();
- bh_lock_sock(sk);
/* XXX This is lame, we should provide a way to lookup sockets by
* qp_handle.
*/
- if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
- e_payload->handle))
- vmci_transport_handle_detach(sk);
+ if (vmci_handle_is_invalid(e_payload->handle) ||
+ vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+ return;
- bh_unlock_sock(sk);
- local_bh_enable();
+ /* We don't ask for delayed CBs when we subscribe to this event (we
+ * pass 0 as flags to vmci_event_subscribe()). VMCI makes no
+ * guarantees in that case about what context we might be running in,
+ * so it could be BH or process, blockable or non-blockable. So we
+ * need to account for all possible contexts here.
+ */
+ spin_lock_bh(&trans->lock);
+ if (!trans->sk)
+ goto out;
+
+ /* Apart from here, trans->lock is only grabbed as part of sk destruct,
+ * where trans->sk isn't locked.
+ */
+ bh_lock_sock(trans->sk);
+
+ vmci_transport_handle_detach(trans->sk);
+
+ bh_unlock_sock(trans->sk);
+ out:
+ spin_unlock_bh(&trans->lock);
}
static void vmci_transport_qp_resumed_cb(u32 sub_id,
@@ -1181,7 +1155,7 @@ vmci_transport_recv_connecting_server(struct sock *listener,
*/
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
vmci_transport_peer_detach_cb,
- pending, &detach_sub_id);
+ vmci_trans(vpending), &detach_sub_id);
if (err < VMCI_SUCCESS) {
vmci_transport_send_reset(pending, pkt);
err = vmci_transport_error_to_vsock_error(err);
@@ -1321,7 +1295,6 @@ vmci_transport_recv_connecting_client(struct sock *sk,
|| vmci_trans(vsk)->qpair
|| vmci_trans(vsk)->produce_size != 0
|| vmci_trans(vsk)->consume_size != 0
- || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
|| vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
skerr = EPROTO;
err = -EINVAL;
@@ -1389,7 +1362,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
struct vsock_sock *vsk;
struct vmci_handle handle;
struct vmci_qp *qpair;
- u32 attach_sub_id;
u32 detach_sub_id;
bool is_local;
u32 flags;
@@ -1399,7 +1371,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
vsk = vsock_sk(sk);
handle = VMCI_INVALID_HANDLE;
- attach_sub_id = VMCI_INVALID_ID;
detach_sub_id = VMCI_INVALID_ID;
/* If we have gotten here then we should be past the point where old
@@ -1444,23 +1415,15 @@ static int vmci_transport_recv_connecting_client_negotiate(
goto destroy;
}
- /* Subscribe to attach and detach events first.
+ /* Subscribe to detach events first.
*
* XXX We attach once for each queue pair created for now so it is easy
* to find the socket (it's provided), but later we should only
* subscribe once and add a way to lookup sockets by queue pair handle.
*/
- err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
- vmci_transport_peer_attach_cb,
- sk, &attach_sub_id);
- if (err < VMCI_SUCCESS) {
- err = vmci_transport_error_to_vsock_error(err);
- goto destroy;
- }
-
err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
vmci_transport_peer_detach_cb,
- sk, &detach_sub_id);
+ vmci_trans(vsk), &detach_sub_id);
if (err < VMCI_SUCCESS) {
err = vmci_transport_error_to_vsock_error(err);
goto destroy;
@@ -1496,7 +1459,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
pkt->u.size;
- vmci_trans(vsk)->attach_sub_id = attach_sub_id;
vmci_trans(vsk)->detach_sub_id = detach_sub_id;
vmci_trans(vsk)->notify_ops->process_negotiate(sk);
@@ -1504,9 +1466,6 @@ static int vmci_transport_recv_connecting_client_negotiate(
return 0;
destroy:
- if (attach_sub_id != VMCI_INVALID_ID)
- vmci_event_unsubscribe(attach_sub_id);
-
if (detach_sub_id != VMCI_INVALID_ID)
vmci_event_unsubscribe(detach_sub_id);
@@ -1607,9 +1566,11 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
vmci_trans(vsk)->qpair = NULL;
vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
- vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
- VMCI_INVALID_ID;
+ vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
vmci_trans(vsk)->notify_ops = NULL;
+ INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
+ vmci_trans(vsk)->sk = &vsk->sk;
+ spin_lock_init(&vmci_trans(vsk)->lock);
if (psk) {
vmci_trans(vsk)->queue_pair_size =
vmci_trans(psk)->queue_pair_size;
@@ -1629,29 +1590,57 @@ static int vmci_transport_socket_init(struct vsock_sock *vsk,
return 0;
}
-static void vmci_transport_destruct(struct vsock_sock *vsk)
+static void vmci_transport_free_resources(struct list_head *transport_list)
{
- if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
- vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
- vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
- }
+ while (!list_empty(transport_list)) {
+ struct vmci_transport *transport =
+ list_first_entry(transport_list, struct vmci_transport,
+ elem);
+ list_del(&transport->elem);
- if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
- vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
- vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
- }
+ if (transport->detach_sub_id != VMCI_INVALID_ID) {
+ vmci_event_unsubscribe(transport->detach_sub_id);
+ transport->detach_sub_id = VMCI_INVALID_ID;
+ }
- if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
- vmci_qpair_detach(&vmci_trans(vsk)->qpair);
- vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
- vmci_trans(vsk)->produce_size = 0;
- vmci_trans(vsk)->consume_size = 0;
+ if (!vmci_handle_is_invalid(transport->qp_handle)) {
+ vmci_qpair_detach(&transport->qpair);
+ transport->qp_handle = VMCI_INVALID_HANDLE;
+ transport->produce_size = 0;
+ transport->consume_size = 0;
+ }
+
+ kfree(transport);
}
+}
+
+static void vmci_transport_cleanup(struct work_struct *work)
+{
+ LIST_HEAD(pending);
+
+ spin_lock_bh(&vmci_transport_cleanup_lock);
+ list_replace_init(&vmci_transport_cleanup_list, &pending);
+ spin_unlock_bh(&vmci_transport_cleanup_lock);
+ vmci_transport_free_resources(&pending);
+}
+
+static void vmci_transport_destruct(struct vsock_sock *vsk)
+{
+ /* Ensure that the detach callback doesn't use the sk/vsk
+ * we are about to destruct.
+ */
+ spin_lock_bh(&vmci_trans(vsk)->lock);
+ vmci_trans(vsk)->sk = NULL;
+ spin_unlock_bh(&vmci_trans(vsk)->lock);
if (vmci_trans(vsk)->notify_ops)
vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
- kfree(vsk->trans);
+ spin_lock_bh(&vmci_transport_cleanup_lock);
+ list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
+ spin_unlock_bh(&vmci_transport_cleanup_lock);
+ schedule_work(&vmci_transport_cleanup_work);
+
vsk->trans = NULL;
}
@@ -2146,6 +2135,9 @@ module_init(vmci_transport_init);
static void __exit vmci_transport_exit(void)
{
+ cancel_work_sync(&vmci_transport_cleanup_work);
+ vmci_transport_free_resources(&vmci_transport_cleanup_list);
+
if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
if (vmci_datagram_destroy_handle(
vmci_transport_stream_handle) != VMCI_SUCCESS)
@@ -2164,6 +2156,7 @@ module_exit(vmci_transport_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
+MODULE_VERSION("1.0.2.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index ce6c9623d..2ad46f396 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -119,10 +119,12 @@ struct vmci_transport {
u64 queue_pair_size;
u64 queue_pair_min_size;
u64 queue_pair_max_size;
- u32 attach_sub_id;
u32 detach_sub_id;
union vmci_transport_notify notify;
struct vmci_transport_notify_ops *notify_ops;
+ struct list_head elem;
+ struct sock *sk;
+ spinlock_t lock; /* protects sk. */
};
int vmci_transport_register(void);
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 7d730543f..477364ad7 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -135,8 +135,7 @@ EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
* @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
* %WIMAX_RF_OFF radio off.
*
- * Reports changes in the software RF switch state to the the WiMAX
- * stack.
+ * Reports changes in the software RF switch state to the WiMAX stack.
*
* The main use is during initialization, so the driver can query the
* device for its current software radio kill switch state and feed it
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 2a0bbd228..3893409de 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -407,6 +407,9 @@ use_default_name:
INIT_LIST_HEAD(&rdev->bss_list);
INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
+ INIT_LIST_HEAD(&rdev->mlme_unreg);
+ spin_lock_init(&rdev->mlme_unreg_lock);
+ INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk);
INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
cfg80211_dfs_channels_update_work);
#ifdef CONFIG_CFG80211_WEXT
@@ -802,6 +805,7 @@ void wiphy_unregister(struct wiphy *wiphy)
cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
flush_work(&rdev->destroy_work);
flush_work(&rdev->sched_scan_stop_wk);
+ flush_work(&rdev->mlme_unreg_wk);
#ifdef CONFIG_PM
if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -855,6 +859,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_DEVICE:
+ cfg80211_mlme_purge_registrations(wdev);
cfg80211_stop_p2p_device(rdev, wdev);
break;
default:
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 311eef26b..b9d5bc8c1 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -59,6 +59,10 @@ struct cfg80211_registered_device {
struct list_head beacon_registrations;
spinlock_t beacon_registrations_lock;
+ struct list_head mlme_unreg;
+ spinlock_t mlme_unreg_lock;
+ struct work_struct mlme_unreg_wk;
+
/* protected by RTNL only */
int num_running_ifaces;
int num_running_monitor_ifaces;
@@ -348,6 +352,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
u16 frame_type, const u8 *match_data,
int match_len);
+void cfg80211_mlme_unreg_wk(struct work_struct *wk);
void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 7aae329e2..fb44fa3bf 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -2,6 +2,7 @@
* cfg80211 MLME SAP interface
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2015 Intel Deutschland GmbH
*/
#include <linux/kernel.h>
@@ -389,6 +390,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
struct cfg80211_mgmt_registration {
struct list_head list;
+ struct wireless_dev *wdev;
u32 nlportid;
@@ -399,6 +401,46 @@ struct cfg80211_mgmt_registration {
u8 match[];
};
+static void
+cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_mgmt_registration *reg;
+
+ ASSERT_RTNL();
+
+ spin_lock_bh(&rdev->mlme_unreg_lock);
+ while ((reg = list_first_entry_or_null(&rdev->mlme_unreg,
+ struct cfg80211_mgmt_registration,
+ list))) {
+ list_del(&reg->list);
+ spin_unlock_bh(&rdev->mlme_unreg_lock);
+
+ if (rdev->ops->mgmt_frame_register) {
+ u16 frame_type = le16_to_cpu(reg->frame_type);
+
+ rdev_mgmt_frame_register(rdev, reg->wdev,
+ frame_type, false);
+ }
+
+ kfree(reg);
+
+ spin_lock_bh(&rdev->mlme_unreg_lock);
+ }
+ spin_unlock_bh(&rdev->mlme_unreg_lock);
+}
+
+void cfg80211_mlme_unreg_wk(struct work_struct *wk)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(wk, struct cfg80211_registered_device,
+ mlme_unreg_wk);
+
+ rtnl_lock();
+ cfg80211_process_mlme_unregistrations(rdev);
+ rtnl_unlock();
+}
+
int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
u16 frame_type, const u8 *match_data,
int match_len)
@@ -449,11 +491,18 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
nreg->match_len = match_len;
nreg->nlportid = snd_portid;
nreg->frame_type = cpu_to_le16(frame_type);
+ nreg->wdev = wdev;
list_add(&nreg->list, &wdev->mgmt_registrations);
+ spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+ /* process all unregistrations to avoid driver confusion */
+ cfg80211_process_mlme_unregistrations(rdev);
if (rdev->ops->mgmt_frame_register)
rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
+ return 0;
+
out:
spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -472,15 +521,12 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
if (reg->nlportid != nlportid)
continue;
- if (rdev->ops->mgmt_frame_register) {
- u16 frame_type = le16_to_cpu(reg->frame_type);
-
- rdev_mgmt_frame_register(rdev, wdev,
- frame_type, false);
- }
-
list_del(&reg->list);
- kfree(reg);
+ spin_lock(&rdev->mlme_unreg_lock);
+ list_add_tail(&reg->list, &rdev->mlme_unreg);
+ spin_unlock(&rdev->mlme_unreg_lock);
+
+ schedule_work(&rdev->mlme_unreg_wk);
}
spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -496,16 +542,15 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
{
- struct cfg80211_mgmt_registration *reg, *tmp;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
spin_lock_bh(&wdev->mgmt_registrations_lock);
-
- list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
- list_del(&reg->list);
- kfree(reg);
- }
-
+ spin_lock(&rdev->mlme_unreg_lock);
+ list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg);
+ spin_unlock(&rdev->mlme_unreg_lock);
spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+ cfg80211_process_mlme_unregistrations(rdev);
}
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 76b41578a..6a1040daa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2321,6 +2321,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.frag_threshold = old_frag_threshold;
rdev->wiphy.rts_threshold = old_rts_threshold;
rdev->wiphy.coverage_class = old_coverage_class;
+ return result;
}
}
return 0;
@@ -3408,12 +3409,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
wdev->iftype))
return -EINVAL;
- if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
- params.acl = parse_acl_data(&rdev->wiphy, info);
- if (IS_ERR(params.acl))
- return PTR_ERR(params.acl);
- }
-
if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
params.smps_mode =
nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
@@ -3437,6 +3432,12 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
params.smps_mode = NL80211_SMPS_OFF;
}
+ if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
+ params.acl = parse_acl_data(&rdev->wiphy, info);
+ if (IS_ERR(params.acl))
+ return PTR_ERR(params.acl);
+ }
+
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, &params);
if (!err) {
@@ -7390,7 +7391,8 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
int err;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
return -EOPNOTSUPP;
if (!rdev->ops->set_mcast_rate)
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index c6e83a746..c23516d0f 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -733,6 +733,8 @@ static inline void
rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, u16 frame_type, bool reg)
{
+ might_sleep();
+
trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
trace_rdev_return_void(&rdev->wiphy);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index aa2d75482..2510b2314 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1004,7 +1004,7 @@ static u32 map_regdom_flags(u32 rd_flags)
static const struct ieee80211_reg_rule *
freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
- const struct ieee80211_regdomain *regd)
+ const struct ieee80211_regdomain *regd, u32 bw)
{
int i;
bool band_rule_found = false;
@@ -1028,7 +1028,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
if (!band_rule_found)
band_rule_found = freq_in_rule_band(fr, center_freq);
- bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+ bw_fits = reg_does_bw_fit(fr, center_freq, bw);
if (band_rule_found && bw_fits)
return rr;
@@ -1040,14 +1040,26 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
return ERR_PTR(-EINVAL);
}
-const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
- u32 center_freq)
+const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
+ u32 center_freq, u32 min_bw)
{
- const struct ieee80211_regdomain *regd;
+ const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
+ const struct ieee80211_reg_rule *reg_rule = NULL;
+ u32 bw;
- regd = reg_get_regdomain(wiphy);
+ for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+ reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw);
+ if (!IS_ERR(reg_rule))
+ return reg_rule;
+ }
- return freq_reg_info_regd(wiphy, center_freq, regd);
+ return reg_rule;
+}
+
+const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
+ u32 center_freq)
+{
+ return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
}
EXPORT_SYMBOL(freq_reg_info);
@@ -1176,8 +1188,20 @@ static void handle_channel(struct wiphy *wiphy,
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+ /* If we get a reg_rule we can assume that at least 5Mhz fit */
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(10)))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(20)))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags = IEEE80211_CHAN_NO_HT40;
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1695,9 +1719,15 @@ static void handle_channel_custom(struct wiphy *wiphy,
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
u32 max_bandwidth_khz;
+ u32 bw;
- reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
- regd);
+ for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+ reg_rule = freq_reg_info_regd(wiphy,
+ MHZ_TO_KHZ(chan->center_freq),
+ regd, bw);
+ if (!IS_ERR(reg_rule))
+ break;
+ }
if (IS_ERR(reg_rule)) {
REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
@@ -1721,8 +1751,20 @@ static void handle_channel_custom(struct wiphy *wiphy,
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+ /* If we get a reg_rule we can assume that at least 5Mhz fit */
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(10)))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(20)))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags = IEEE80211_CHAN_NO_HT40;
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -2079,10 +2121,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
reg_process_hint_core(reg_request);
return;
case NL80211_REGDOM_SET_BY_USER:
- treatment = reg_process_hint_user(reg_request);
- if (treatment == REG_REQ_IGNORE ||
- treatment == REG_REQ_ALREADY_SET)
- return;
+ reg_process_hint_user(reg_request);
return;
case NL80211_REGDOM_SET_BY_DRIVER:
if (!wiphy)
@@ -2099,7 +2138,9 @@ static void reg_process_hint(struct regulatory_request *reg_request)
goto out_free;
}
- /* This is required so that the orig_* parameters are saved */
+ /* This is required so that the orig_* parameters are saved.
+ * NOTE: treatment must be set for any case that reaches here!
+ */
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
wiphy_update_regulatory(wiphy, reg_request->initiator);
@@ -2584,7 +2625,7 @@ static void restore_regulatory_settings(bool reset_user)
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
- regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
+ regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
spin_lock(&reg_requests_lock);
list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 42f7c76cf..f07224d8b 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -31,7 +31,7 @@ static struct xfrm_algo_desc aead_list[] = {
.uinfo = {
.aead = {
- .geniv = "seqniv",
+ .geniv = "seqiv",
.icv_truncbits = 64,
}
},
@@ -50,7 +50,7 @@ static struct xfrm_algo_desc aead_list[] = {
.uinfo = {
.aead = {
- .geniv = "seqniv",
+ .geniv = "seqiv",
.icv_truncbits = 96,
}
},
@@ -69,7 +69,7 @@ static struct xfrm_algo_desc aead_list[] = {
.uinfo = {
.aead = {
- .geniv = "seqniv",
+ .geniv = "seqiv",
.icv_truncbits = 128,
}
},
@@ -88,7 +88,7 @@ static struct xfrm_algo_desc aead_list[] = {
.uinfo = {
.aead = {
- .geniv = "seqniv",
+ .geniv = "seqiv",
.icv_truncbits = 64,
}
},
@@ -107,7 +107,7 @@ static struct xfrm_algo_desc aead_list[] = {
.uinfo = {
.aead = {
- .geniv = "seqniv",
+ .geniv = "seqiv",
.icv_truncbits = 96,
}
},
@@ -126,7 +126,7 @@ static struct xfrm_algo_desc aead_list[] = {
.uinfo = {
.aead = {
- .geniv = "seqniv",
+ .geniv = "seqiv",
.icv_truncbits = 128,
}
},
@@ -164,7 +164,7 @@ static struct xfrm_algo_desc aead_list[] = {
.uinfo = {
.aead = {
- .geniv = "seqniv",
+ .geniv = "seqiv",
.icv_truncbits = 128,
}
},
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 18cead764..94af3d065 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -115,7 +115,8 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
rcu_read_unlock();
}
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
+ int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
int family)
@@ -127,14 +128,15 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
- dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+ dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
xfrm_policy_put_afinfo(afinfo);
return dst;
}
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+ int tos, int oif,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
int family)
@@ -153,7 +155,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
daddr = x->coaddr;
}
- dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+ dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
@@ -1373,15 +1375,15 @@ int __xfrm_sk_clone_policy(struct sock *sk)
}
static int
-xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
- unsigned short family)
+xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+ xfrm_address_t *remote, unsigned short family)
{
int err;
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EINVAL;
- err = afinfo->get_saddr(net, local, remote);
+ err = afinfo->get_saddr(net, oif, local, remote);
xfrm_policy_put_afinfo(afinfo);
return err;
}
@@ -1410,7 +1412,9 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
- error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
+ error = xfrm_get_saddr(net, fl->flowi_oif,
+ &tmp, remote,
+ tmpl->encap_family);
if (error)
goto fail;
local = &tmp;
@@ -1690,8 +1694,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
family = xfrm[i]->props.family;
- dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
- family);
+ dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+ &saddr, &daddr, family);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bd16c6c7e..24e06a237 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -925,12 +925,10 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
return err;
if (attrs[XFRMA_ADDRESS_FILTER]) {
- filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+ filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
+ sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
return -ENOMEM;
-
- memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]),
- sizeof(*filter));
}
if (attrs[XFRMA_PROTO])
@@ -1930,8 +1928,10 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+ struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
- if (!lt && !rp && !re)
+ if (!lt && !rp && !re && !et && !rt)
return err;
/* pedantic mode - thou shalt sayeth replaceth */
@@ -2048,7 +2048,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
xfrm_audit_policy_delete(xp, 1, true);
} else {
// reset the timers here?
- WARN(1, "Dont know what to do with soft policy expire\n");
+ WARN(1, "Don't know what to do with soft policy expire\n");
}
km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);