diff options
Diffstat (limited to 'net/8021q')
-rw-r--r-- | net/8021q/Kconfig | 40 | ||||
-rw-r--r-- | net/8021q/Makefile | 11 | ||||
-rw-r--r-- | net/8021q/vlan.c | 704 | ||||
-rw-r--r-- | net/8021q/vlan.h | 173 | ||||
-rw-r--r-- | net/8021q/vlan_core.c | 362 | ||||
-rw-r--r-- | net/8021q/vlan_dev.c | 804 | ||||
-rw-r--r-- | net/8021q/vlan_gvrp.c | 70 | ||||
-rw-r--r-- | net/8021q/vlan_mvrp.c | 76 | ||||
-rw-r--r-- | net/8021q/vlan_netlink.c | 273 | ||||
-rw-r--r-- | net/8021q/vlanproc.c | 326 | ||||
-rw-r--r-- | net/8021q/vlanproc.h | 20 |
11 files changed, 2859 insertions, 0 deletions
diff --git a/net/8021q/Kconfig b/net/8021q/Kconfig new file mode 100644 index 000000000..423201809 --- /dev/null +++ b/net/8021q/Kconfig @@ -0,0 +1,40 @@ +# +# Configuration for 802.1Q VLAN support +# + +config VLAN_8021Q + tristate "802.1Q/802.1ad VLAN Support" + ---help--- + Select this and you will be able to create 802.1Q VLAN interfaces + on your Ethernet interfaces. 802.1Q VLAN supports almost + everything a regular Ethernet interface does, including + firewalling, bridging, and of course IP traffic. You will need + the 'ip' utility in order to effectively use VLANs. + See the VLAN web page for more information: + <http://www.candelatech.com/~greear/vlan.html> + + To compile this code as a module, choose M here: the module + will be called 8021q. + + If unsure, say N. + +config VLAN_8021Q_GVRP + bool "GVRP (GARP VLAN Registration Protocol) support" + depends on VLAN_8021Q + select GARP + help + Select this to enable GVRP end-system support. GVRP is used for + automatic propagation of registered VLANs to switches. + + If unsure, say N. + +config VLAN_8021Q_MVRP + bool "MVRP (Multiple VLAN Registration Protocol) support" + depends on VLAN_8021Q + select MRP + help + Select this to enable MVRP end-system support. MVRP is used for + automatic propagation of registered VLANs to switches; it + supersedes GVRP and is not backwards-compatible. + + If unsure, say N. diff --git a/net/8021q/Makefile b/net/8021q/Makefile new file mode 100644 index 000000000..7bc8db08d --- /dev/null +++ b/net/8021q/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Linux VLAN layer. +# +obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o +obj-$(CONFIG_VLAN_8021Q) += 8021q.o + +8021q-y := vlan.o vlan_dev.o vlan_netlink.o +8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o +8021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o +8021q-$(CONFIG_PROC_FS) += vlanproc.o + diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c new file mode 100644 index 000000000..59555f0f8 --- /dev/null +++ b/net/8021q/vlan.c @@ -0,0 +1,704 @@ +/* + * INET 802.1Q VLAN + * Ethernet-type device handling. + * + * Authors: Ben Greear <greearb@candelatech.com> + * Please send support related email to: netdev@vger.kernel.org + * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html + * + * Fixes: + * Fix for packet capture - Nick Eggleston <nick@dccinc.com>; + * Add HW acceleration hooks - David S. Miller <davem@redhat.com>; + * Correct all the locking - David S. Miller <davem@redhat.com>; + * Use hash table for VLAN groups - David S. Miller <davem@redhat.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/capability.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/rculist.h> +#include <net/p8022.h> +#include <net/arp.h> +#include <linux/rtnetlink.h> +#include <linux/notifier.h> +#include <net/rtnetlink.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> +#include <asm/uaccess.h> + +#include <linux/if_vlan.h> +#include "vlan.h" +#include "vlanproc.h" + +#define DRV_VERSION "1.8" + +/* Global VLAN variables */ + +int vlan_net_id __read_mostly; + +const char vlan_fullname[] = "802.1Q VLAN Support"; +const char vlan_version[] = DRV_VERSION; + +/* End of global variables definitions. */ + +static int vlan_group_prealloc_vid(struct vlan_group *vg, + __be16 vlan_proto, u16 vlan_id) +{ + struct net_device **array; + unsigned int pidx, vidx; + unsigned int size; + + ASSERT_RTNL(); + + pidx = vlan_proto_idx(vlan_proto); + vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN; + array = vg->vlan_devices_arrays[pidx][vidx]; + if (array != NULL) + return 0; + + size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; + array = kzalloc(size, GFP_KERNEL); + if (array == NULL) + return -ENOBUFS; + + vg->vlan_devices_arrays[pidx][vidx] = array; + return 0; +} + +void unregister_vlan_dev(struct net_device *dev, struct list_head *head) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; + struct vlan_info *vlan_info; + struct vlan_group *grp; + u16 vlan_id = vlan->vlan_id; + + ASSERT_RTNL(); + + vlan_info = rtnl_dereference(real_dev->vlan_info); + BUG_ON(!vlan_info); + + grp = &vlan_info->grp; + + grp->nr_vlan_devs--; + + if (vlan->flags & VLAN_FLAG_MVRP) + vlan_mvrp_request_leave(dev); + if (vlan->flags & VLAN_FLAG_GVRP) + vlan_gvrp_request_leave(dev); + + vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL); + + netdev_upper_dev_unlink(real_dev, dev); + /* Because unregister_netdevice_queue() makes sure at least one rcu + * grace period is respected before device freeing, + * we dont need to call synchronize_net() here. + */ + unregister_netdevice_queue(dev, head); + + if (grp->nr_vlan_devs == 0) { + vlan_mvrp_uninit_applicant(real_dev); + vlan_gvrp_uninit_applicant(real_dev); + } + + /* Take it out of our own structures, but be sure to interlock with + * HW accelerating devices or SW vlan input packet processing if + * VLAN is not 0 (leave it there for 802.1p). + */ + if (vlan_id) + vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); + + /* Get rid of the vlan's reference to real_dev */ + dev_put(real_dev); +} + +int vlan_check_real_dev(struct net_device *real_dev, + __be16 protocol, u16 vlan_id) +{ + const char *name = real_dev->name; + + if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { + pr_info("VLANs not supported on %s\n", name); + return -EOPNOTSUPP; + } + + if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) + return -EEXIST; + + return 0; +} + +int register_vlan_dev(struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; + u16 vlan_id = vlan->vlan_id; + struct vlan_info *vlan_info; + struct vlan_group *grp; + int err; + + err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id); + if (err) + return err; + + vlan_info = rtnl_dereference(real_dev->vlan_info); + /* vlan_info should be there now. vlan_vid_add took care of it */ + BUG_ON(!vlan_info); + + grp = &vlan_info->grp; + if (grp->nr_vlan_devs == 0) { + err = vlan_gvrp_init_applicant(real_dev); + if (err < 0) + goto out_vid_del; + err = vlan_mvrp_init_applicant(real_dev); + if (err < 0) + goto out_uninit_gvrp; + } + + err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id); + if (err < 0) + goto out_uninit_mvrp; + + vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; + err = register_netdevice(dev); + if (err < 0) + goto out_uninit_mvrp; + + err = netdev_upper_dev_link(real_dev, dev); + if (err) + goto out_unregister_netdev; + + /* Account for reference in struct vlan_dev_priv */ + dev_hold(real_dev); + + netif_stacked_transfer_operstate(real_dev, dev); + linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ + + /* So, got the sucker initialized, now lets place + * it into our local structure. + */ + vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); + grp->nr_vlan_devs++; + + return 0; + +out_unregister_netdev: + unregister_netdevice(dev); +out_uninit_mvrp: + if (grp->nr_vlan_devs == 0) + vlan_mvrp_uninit_applicant(real_dev); +out_uninit_gvrp: + if (grp->nr_vlan_devs == 0) + vlan_gvrp_uninit_applicant(real_dev); +out_vid_del: + vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); + return err; +} + +/* Attach a VLAN device to a mac address (ie Ethernet Card). + * Returns 0 if the device was created or a negative error code otherwise. + */ +static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) +{ + struct net_device *new_dev; + struct vlan_dev_priv *vlan; + struct net *net = dev_net(real_dev); + struct vlan_net *vn = net_generic(net, vlan_net_id); + char name[IFNAMSIZ]; + int err; + + if (vlan_id >= VLAN_VID_MASK) + return -ERANGE; + + err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id); + if (err < 0) + return err; + + /* Gotta set up the fields for the device. */ + switch (vn->name_type) { + case VLAN_NAME_TYPE_RAW_PLUS_VID: + /* name will look like: eth1.0005 */ + snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id); + break; + case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: + /* Put our vlan.VID in the name. + * Name will look like: vlan5 + */ + snprintf(name, IFNAMSIZ, "vlan%i", vlan_id); + break; + case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: + /* Put our vlan.VID in the name. + * Name will look like: eth0.5 + */ + snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id); + break; + case VLAN_NAME_TYPE_PLUS_VID: + /* Put our vlan.VID in the name. + * Name will look like: vlan0005 + */ + default: + snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); + } + + new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, + NET_NAME_UNKNOWN, vlan_setup); + + if (new_dev == NULL) + return -ENOBUFS; + + dev_net_set(new_dev, net); + /* need 4 bytes for extra VLAN header info, + * hope the underlying device can handle it. + */ + new_dev->mtu = real_dev->mtu; + new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT); + + vlan = vlan_dev_priv(new_dev); + vlan->vlan_proto = htons(ETH_P_8021Q); + vlan->vlan_id = vlan_id; + vlan->real_dev = real_dev; + vlan->dent = NULL; + vlan->flags = VLAN_FLAG_REORDER_HDR; + + new_dev->rtnl_link_ops = &vlan_link_ops; + err = register_vlan_dev(new_dev); + if (err < 0) + goto out_free_newdev; + + return 0; + +out_free_newdev: + free_netdev(new_dev); + return err; +} + +static void vlan_sync_address(struct net_device *dev, + struct net_device *vlandev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + + /* May be called without an actual change */ + if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) + return; + + /* vlan address was different from the old address and is equal to + * the new address */ + if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) + dev_uc_del(dev, vlandev->dev_addr); + + /* vlan address was equal to the old address and is different from + * the new address */ + if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && + !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) + dev_uc_add(dev, vlandev->dev_addr); + + ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); +} + +static void vlan_transfer_features(struct net_device *dev, + struct net_device *vlandev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + + vlandev->gso_max_size = dev->gso_max_size; + + if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) + vlandev->hard_header_len = dev->hard_header_len; + else + vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; + +#if IS_ENABLED(CONFIG_FCOE) + vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; +#endif + + netdev_update_features(vlandev); +} + +static int __vlan_device_event(struct net_device *dev, unsigned long event) +{ + int err = 0; + + switch (event) { + case NETDEV_CHANGENAME: + vlan_proc_rem_dev(dev); + err = vlan_proc_add_dev(dev); + break; + case NETDEV_REGISTER: + err = vlan_proc_add_dev(dev); + break; + case NETDEV_UNREGISTER: + vlan_proc_rem_dev(dev); + break; + } + + return err; +} + +static int vlan_device_event(struct notifier_block *unused, unsigned long event, + void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct vlan_group *grp; + struct vlan_info *vlan_info; + int i, flgs; + struct net_device *vlandev; + struct vlan_dev_priv *vlan; + bool last = false; + LIST_HEAD(list); + + if (is_vlan_dev(dev)) { + int err = __vlan_device_event(dev, event); + + if (err) + return notifier_from_errno(err); + } + + if ((event == NETDEV_UP) && + (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { + pr_info("adding VLAN 0 to HW filter on device %s\n", + dev->name); + vlan_vid_add(dev, htons(ETH_P_8021Q), 0); + } + + vlan_info = rtnl_dereference(dev->vlan_info); + if (!vlan_info) + goto out; + grp = &vlan_info->grp; + + /* It is OK that we do not hold the group lock right now, + * as we run under the RTNL lock. + */ + + switch (event) { + case NETDEV_CHANGE: + /* Propagate real device state to vlan devices */ + vlan_group_for_each_dev(grp, i, vlandev) + netif_stacked_transfer_operstate(dev, vlandev); + break; + + case NETDEV_CHANGEADDR: + /* Adjust unicast filters on underlying device */ + vlan_group_for_each_dev(grp, i, vlandev) { + flgs = vlandev->flags; + if (!(flgs & IFF_UP)) + continue; + + vlan_sync_address(dev, vlandev); + } + break; + + case NETDEV_CHANGEMTU: + vlan_group_for_each_dev(grp, i, vlandev) { + if (vlandev->mtu <= dev->mtu) + continue; + + dev_set_mtu(vlandev, dev->mtu); + } + break; + + case NETDEV_FEAT_CHANGE: + /* Propagate device features to underlying device */ + vlan_group_for_each_dev(grp, i, vlandev) + vlan_transfer_features(dev, vlandev); + break; + + case NETDEV_DOWN: { + struct net_device *tmp; + LIST_HEAD(close_list); + + if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + vlan_vid_del(dev, htons(ETH_P_8021Q), 0); + + /* Put all VLANs for this dev in the down state too. */ + vlan_group_for_each_dev(grp, i, vlandev) { + flgs = vlandev->flags; + if (!(flgs & IFF_UP)) + continue; + + vlan = vlan_dev_priv(vlandev); + if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) + list_add(&vlandev->close_list, &close_list); + } + + dev_close_many(&close_list, false); + + list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) { + netif_stacked_transfer_operstate(dev, vlandev); + list_del_init(&vlandev->close_list); + } + list_del(&close_list); + break; + } + case NETDEV_UP: + /* Put all VLANs for this dev in the up state too. */ + vlan_group_for_each_dev(grp, i, vlandev) { + flgs = dev_get_flags(vlandev); + if (flgs & IFF_UP) + continue; + + vlan = vlan_dev_priv(vlandev); + if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) + dev_change_flags(vlandev, flgs | IFF_UP); + netif_stacked_transfer_operstate(dev, vlandev); + } + break; + + case NETDEV_UNREGISTER: + /* twiddle thumbs on netns device moves */ + if (dev->reg_state != NETREG_UNREGISTERING) + break; + + vlan_group_for_each_dev(grp, i, vlandev) { + /* removal of last vid destroys vlan_info, abort + * afterwards */ + if (vlan_info->nr_vids == 1) + last = true; + + unregister_vlan_dev(vlandev, &list); + if (last) + break; + } + unregister_netdevice_many(&list); + break; + + case NETDEV_PRE_TYPE_CHANGE: + /* Forbid underlaying device to change its type. */ + if (vlan_uses_dev(dev)) + return NOTIFY_BAD; + break; + + case NETDEV_NOTIFY_PEERS: + case NETDEV_BONDING_FAILOVER: + case NETDEV_RESEND_IGMP: + /* Propagate to vlan devices */ + vlan_group_for_each_dev(grp, i, vlandev) + call_netdevice_notifiers(event, vlandev); + break; + } + +out: + return NOTIFY_DONE; +} + +static struct notifier_block vlan_notifier_block __read_mostly = { + .notifier_call = vlan_device_event, +}; + +/* + * VLAN IOCTL handler. + * o execute requested action or pass command to the device driver + * arg is really a struct vlan_ioctl_args __user *. + */ +static int vlan_ioctl_handler(struct net *net, void __user *arg) +{ + int err; + struct vlan_ioctl_args args; + struct net_device *dev = NULL; + + if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args))) + return -EFAULT; + + /* Null terminate this sucker, just in case. */ + args.device1[23] = 0; + args.u.device2[23] = 0; + + rtnl_lock(); + + switch (args.cmd) { + case SET_VLAN_INGRESS_PRIORITY_CMD: + case SET_VLAN_EGRESS_PRIORITY_CMD: + case SET_VLAN_FLAG_CMD: + case ADD_VLAN_CMD: + case DEL_VLAN_CMD: + case GET_VLAN_REALDEV_NAME_CMD: + case GET_VLAN_VID_CMD: + err = -ENODEV; + dev = __dev_get_by_name(net, args.device1); + if (!dev) + goto out; + + err = -EINVAL; + if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev)) + goto out; + } + + switch (args.cmd) { + case SET_VLAN_INGRESS_PRIORITY_CMD: + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; + vlan_dev_set_ingress_priority(dev, + args.u.skb_priority, + args.vlan_qos); + err = 0; + break; + + case SET_VLAN_EGRESS_PRIORITY_CMD: + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; + err = vlan_dev_set_egress_priority(dev, + args.u.skb_priority, + args.vlan_qos); + break; + + case SET_VLAN_FLAG_CMD: + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; + err = vlan_dev_change_flags(dev, + args.vlan_qos ? args.u.flag : 0, + args.u.flag); + break; + + case SET_VLAN_NAME_TYPE_CMD: + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; + if ((args.u.name_type >= 0) && + (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { + struct vlan_net *vn; + + vn = net_generic(net, vlan_net_id); + vn->name_type = args.u.name_type; + err = 0; + } else { + err = -EINVAL; + } + break; + + case ADD_VLAN_CMD: + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; + err = register_vlan_device(dev, args.u.VID); + break; + + case DEL_VLAN_CMD: + err = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + break; + unregister_vlan_dev(dev, NULL); + err = 0; + break; + + case GET_VLAN_REALDEV_NAME_CMD: + err = 0; + vlan_dev_get_realdev_name(dev, args.u.device2); + if (copy_to_user(arg, &args, + sizeof(struct vlan_ioctl_args))) + err = -EFAULT; + break; + + case GET_VLAN_VID_CMD: + err = 0; + args.u.VID = vlan_dev_vlan_id(dev); + if (copy_to_user(arg, &args, + sizeof(struct vlan_ioctl_args))) + err = -EFAULT; + break; + + default: + err = -EOPNOTSUPP; + break; + } +out: + rtnl_unlock(); + return err; +} + +static int __net_init vlan_init_net(struct net *net) +{ + struct vlan_net *vn = net_generic(net, vlan_net_id); + int err; + + vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; + + err = vlan_proc_init(net); + + return err; +} + +static void __net_exit vlan_exit_net(struct net *net) +{ + vlan_proc_cleanup(net); +} + +static struct pernet_operations vlan_net_ops = { + .init = vlan_init_net, + .exit = vlan_exit_net, + .id = &vlan_net_id, + .size = sizeof(struct vlan_net), +}; + +static int __init vlan_proto_init(void) +{ + int err; + + pr_info("%s v%s\n", vlan_fullname, vlan_version); + + err = register_pernet_subsys(&vlan_net_ops); + if (err < 0) + goto err0; + + err = register_netdevice_notifier(&vlan_notifier_block); + if (err < 0) + goto err2; + + err = vlan_gvrp_init(); + if (err < 0) + goto err3; + + err = vlan_mvrp_init(); + if (err < 0) + goto err4; + + err = vlan_netlink_init(); + if (err < 0) + goto err5; + + vlan_ioctl_set(vlan_ioctl_handler); + return 0; + +err5: + vlan_mvrp_uninit(); +err4: + vlan_gvrp_uninit(); +err3: + unregister_netdevice_notifier(&vlan_notifier_block); +err2: + unregister_pernet_subsys(&vlan_net_ops); +err0: + return err; +} + +static void __exit vlan_cleanup_module(void) +{ + vlan_ioctl_set(NULL); + vlan_netlink_fini(); + + unregister_netdevice_notifier(&vlan_notifier_block); + + unregister_pernet_subsys(&vlan_net_ops); + rcu_barrier(); /* Wait for completion of call_rcu()'s */ + + vlan_mvrp_uninit(); + vlan_gvrp_uninit(); +} + +module_init(vlan_proto_init); +module_exit(vlan_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h new file mode 100644 index 000000000..9d010a09a --- /dev/null +++ b/net/8021q/vlan.h @@ -0,0 +1,173 @@ +#ifndef __BEN_VLAN_802_1Q_INC__ +#define __BEN_VLAN_802_1Q_INC__ + +#include <linux/if_vlan.h> +#include <linux/u64_stats_sync.h> +#include <linux/list.h> + +/* if this changes, algorithm will have to be reworked because this + * depends on completely exhausting the VLAN identifier space. Thus + * it gives constant time look-up, but in many cases it wastes memory. + */ +#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 +#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) + +enum vlan_protos { + VLAN_PROTO_8021Q = 0, + VLAN_PROTO_8021AD, + VLAN_PROTO_NUM, +}; + +struct vlan_group { + unsigned int nr_vlan_devs; + struct hlist_node hlist; /* linked list */ + struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM] + [VLAN_GROUP_ARRAY_SPLIT_PARTS]; +}; + +struct vlan_info { + struct net_device *real_dev; /* The ethernet(like) device + * the vlan is attached to. + */ + struct vlan_group grp; + struct list_head vid_list; + unsigned int nr_vids; + struct rcu_head rcu; +}; + +static inline unsigned int vlan_proto_idx(__be16 proto) +{ + switch (proto) { + case htons(ETH_P_8021Q): + return VLAN_PROTO_8021Q; + case htons(ETH_P_8021AD): + return VLAN_PROTO_8021AD; + default: + BUG(); + return 0; + } +} + +static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg, + unsigned int pidx, + u16 vlan_id) +{ + struct net_device **array; + + array = vg->vlan_devices_arrays[pidx] + [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; + return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; +} + +static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, + __be16 vlan_proto, + u16 vlan_id) +{ + return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id); +} + +static inline void vlan_group_set_device(struct vlan_group *vg, + __be16 vlan_proto, u16 vlan_id, + struct net_device *dev) +{ + struct net_device **array; + if (!vg) + return; + array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)] + [vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; + array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; +} + +/* Must be invoked with rcu_read_lock or with RTNL. */ +static inline struct net_device *vlan_find_dev(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id) +{ + struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); + + if (vlan_info) + return vlan_group_get_device(&vlan_info->grp, + vlan_proto, vlan_id); + + return NULL; +} + +#define vlan_group_for_each_dev(grp, i, dev) \ + for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \ + if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \ + (i) % VLAN_N_VID))) + +/* found in vlan_dev.c */ +void vlan_dev_set_ingress_priority(const struct net_device *dev, + u32 skb_prio, u16 vlan_prio); +int vlan_dev_set_egress_priority(const struct net_device *dev, + u32 skb_prio, u16 vlan_prio); +int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask); +void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); + +int vlan_check_real_dev(struct net_device *real_dev, + __be16 protocol, u16 vlan_id); +void vlan_setup(struct net_device *dev); +int register_vlan_dev(struct net_device *dev); +void unregister_vlan_dev(struct net_device *dev, struct list_head *head); + +static inline u32 vlan_get_ingress_priority(struct net_device *dev, + u16 vlan_tci) +{ + struct vlan_dev_priv *vip = vlan_dev_priv(dev); + + return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7]; +} + +#ifdef CONFIG_VLAN_8021Q_GVRP +int vlan_gvrp_request_join(const struct net_device *dev); +void vlan_gvrp_request_leave(const struct net_device *dev); +int vlan_gvrp_init_applicant(struct net_device *dev); +void vlan_gvrp_uninit_applicant(struct net_device *dev); +int vlan_gvrp_init(void); +void vlan_gvrp_uninit(void); +#else +static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; } +static inline void vlan_gvrp_request_leave(const struct net_device *dev) {} +static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; } +static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {} +static inline int vlan_gvrp_init(void) { return 0; } +static inline void vlan_gvrp_uninit(void) {} +#endif + +#ifdef CONFIG_VLAN_8021Q_MVRP +int vlan_mvrp_request_join(const struct net_device *dev); +void vlan_mvrp_request_leave(const struct net_device *dev); +int vlan_mvrp_init_applicant(struct net_device *dev); +void vlan_mvrp_uninit_applicant(struct net_device *dev); +int vlan_mvrp_init(void); +void vlan_mvrp_uninit(void); +#else +static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; } +static inline void vlan_mvrp_request_leave(const struct net_device *dev) {} +static inline int vlan_mvrp_init_applicant(struct net_device *dev) { return 0; } +static inline void vlan_mvrp_uninit_applicant(struct net_device *dev) {} +static inline int vlan_mvrp_init(void) { return 0; } +static inline void vlan_mvrp_uninit(void) {} +#endif + +extern const char vlan_fullname[]; +extern const char vlan_version[]; +int vlan_netlink_init(void); +void vlan_netlink_fini(void); + +extern struct rtnl_link_ops vlan_link_ops; + +extern int vlan_net_id; + +struct proc_dir_entry; + +struct vlan_net { + /* /proc/net/vlan */ + struct proc_dir_entry *proc_vlan_dir; + /* /proc/net/vlan/config */ + struct proc_dir_entry *proc_vlan_conf; + /* Determines interface naming scheme. */ + unsigned short name_type; +}; + +#endif /* !(__BEN_VLAN_802_1Q_INC__) */ diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c new file mode 100644 index 000000000..61bf2a06e --- /dev/null +++ b/net/8021q/vlan_core.c @@ -0,0 +1,362 @@ +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/netpoll.h> +#include <linux/export.h> +#include "vlan.h" + +bool vlan_do_receive(struct sk_buff **skbp) +{ + struct sk_buff *skb = *skbp; + __be16 vlan_proto = skb->vlan_proto; + u16 vlan_id = skb_vlan_tag_get_id(skb); + struct net_device *vlan_dev; + struct vlan_pcpu_stats *rx_stats; + + vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id); + if (!vlan_dev) + return false; + + skb = *skbp = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + return false; + + skb->dev = vlan_dev; + if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { + /* Our lower layer thinks this is not local, let's make sure. + * This allows the VLAN to have a different MAC than the + * underlying device, and still route correctly. */ + if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) + skb->pkt_type = PACKET_HOST; + } + + if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { + unsigned int offset = skb->data - skb_mac_header(skb); + + /* + * vlan_insert_tag expect skb->data pointing to mac header. + * So change skb->data before calling it and change back to + * original position later + */ + skb_push(skb, offset); + skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, + skb->vlan_tci); + if (!skb) + return false; + skb_pull(skb, offset + VLAN_HLEN); + skb_reset_mac_len(skb); + } + + skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); + skb->vlan_tci = 0; + + rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); + + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->rx_packets++; + rx_stats->rx_bytes += skb->len; + if (skb->pkt_type == PACKET_MULTICAST) + rx_stats->rx_multicast++; + u64_stats_update_end(&rx_stats->syncp); + + return true; +} + +/* Must be invoked with rcu_read_lock. */ +struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev, + __be16 vlan_proto, u16 vlan_id) +{ + struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); + + if (vlan_info) { + return vlan_group_get_device(&vlan_info->grp, + vlan_proto, vlan_id); + } else { + /* + * Lower devices of master uppers (bonding, team) do not have + * grp assigned to themselves. Grp is assigned to upper device + * instead. + */ + struct net_device *upper_dev; + + upper_dev = netdev_master_upper_dev_get_rcu(dev); + if (upper_dev) + return __vlan_find_dev_deep_rcu(upper_dev, + vlan_proto, vlan_id); + } + + return NULL; +} +EXPORT_SYMBOL(__vlan_find_dev_deep_rcu); + +struct net_device *vlan_dev_real_dev(const struct net_device *dev) +{ + struct net_device *ret = vlan_dev_priv(dev)->real_dev; + + while (is_vlan_dev(ret)) + ret = vlan_dev_priv(ret)->real_dev; + + return ret; +} +EXPORT_SYMBOL(vlan_dev_real_dev); + +u16 vlan_dev_vlan_id(const struct net_device *dev) +{ + return vlan_dev_priv(dev)->vlan_id; +} +EXPORT_SYMBOL(vlan_dev_vlan_id); + +__be16 vlan_dev_vlan_proto(const struct net_device *dev) +{ + return vlan_dev_priv(dev)->vlan_proto; +} +EXPORT_SYMBOL(vlan_dev_vlan_proto); + +/* + * vlan info and vid list + */ + +static void vlan_group_free(struct vlan_group *grp) +{ + int i, j; + + for (i = 0; i < VLAN_PROTO_NUM; i++) + for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++) + kfree(grp->vlan_devices_arrays[i][j]); +} + +static void vlan_info_free(struct vlan_info *vlan_info) +{ + vlan_group_free(&vlan_info->grp); + kfree(vlan_info); +} + +static void vlan_info_rcu_free(struct rcu_head *rcu) +{ + vlan_info_free(container_of(rcu, struct vlan_info, rcu)); +} + +static struct vlan_info *vlan_info_alloc(struct net_device *dev) +{ + struct vlan_info *vlan_info; + + vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); + if (!vlan_info) + return NULL; + + vlan_info->real_dev = dev; + INIT_LIST_HEAD(&vlan_info->vid_list); + return vlan_info; +} + +struct vlan_vid_info { + struct list_head list; + __be16 proto; + u16 vid; + int refcount; +}; + +static bool vlan_hw_filter_capable(const struct net_device *dev, + const struct vlan_vid_info *vid_info) +{ + if (vid_info->proto == htons(ETH_P_8021Q) && + dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + return true; + if (vid_info->proto == htons(ETH_P_8021AD) && + dev->features & NETIF_F_HW_VLAN_STAG_FILTER) + return true; + return false; +} + +static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, + __be16 proto, u16 vid) +{ + struct vlan_vid_info *vid_info; + + list_for_each_entry(vid_info, &vlan_info->vid_list, list) { + if (vid_info->proto == proto && vid_info->vid == vid) + return vid_info; + } + return NULL; +} + +static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) +{ + struct vlan_vid_info *vid_info; + + vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); + if (!vid_info) + return NULL; + vid_info->proto = proto; + vid_info->vid = vid; + + return vid_info; +} + +static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, + struct vlan_vid_info **pvid_info) +{ + struct net_device *dev = vlan_info->real_dev; + const struct net_device_ops *ops = dev->netdev_ops; + struct vlan_vid_info *vid_info; + int err; + + vid_info = vlan_vid_info_alloc(proto, vid); + if (!vid_info) + return -ENOMEM; + + if (vlan_hw_filter_capable(dev, vid_info)) { + err = ops->ndo_vlan_rx_add_vid(dev, proto, vid); + if (err) { + kfree(vid_info); + return err; + } + } + list_add(&vid_info->list, &vlan_info->vid_list); + vlan_info->nr_vids++; + *pvid_info = vid_info; + return 0; +} + +int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) +{ + struct vlan_info *vlan_info; + struct vlan_vid_info *vid_info; + bool vlan_info_created = false; + int err; + + ASSERT_RTNL(); + + vlan_info = rtnl_dereference(dev->vlan_info); + if (!vlan_info) { + vlan_info = vlan_info_alloc(dev); + if (!vlan_info) + return -ENOMEM; + vlan_info_created = true; + } + vid_info = vlan_vid_info_get(vlan_info, proto, vid); + if (!vid_info) { + err = __vlan_vid_add(vlan_info, proto, vid, &vid_info); + if (err) + goto out_free_vlan_info; + } + vid_info->refcount++; + + if (vlan_info_created) + rcu_assign_pointer(dev->vlan_info, vlan_info); + + return 0; + +out_free_vlan_info: + if (vlan_info_created) + kfree(vlan_info); + return err; +} +EXPORT_SYMBOL(vlan_vid_add); + +static void __vlan_vid_del(struct vlan_info *vlan_info, + struct vlan_vid_info *vid_info) +{ + struct net_device *dev = vlan_info->real_dev; + const struct net_device_ops *ops = dev->netdev_ops; + __be16 proto = vid_info->proto; + u16 vid = vid_info->vid; + int err; + + if (vlan_hw_filter_capable(dev, vid_info)) { + err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid); + if (err) { + pr_warn("failed to kill vid %04x/%d for device %s\n", + proto, vid, dev->name); + } + } + list_del(&vid_info->list); + kfree(vid_info); + vlan_info->nr_vids--; +} + +void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) +{ + struct vlan_info *vlan_info; + struct vlan_vid_info *vid_info; + + ASSERT_RTNL(); + + vlan_info = rtnl_dereference(dev->vlan_info); + if (!vlan_info) + return; + + vid_info = vlan_vid_info_get(vlan_info, proto, vid); + if (!vid_info) + return; + vid_info->refcount--; + if (vid_info->refcount == 0) { + __vlan_vid_del(vlan_info, vid_info); + if (vlan_info->nr_vids == 0) { + RCU_INIT_POINTER(dev->vlan_info, NULL); + call_rcu(&vlan_info->rcu, vlan_info_rcu_free); + } + } +} +EXPORT_SYMBOL(vlan_vid_del); + +int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ + struct vlan_vid_info *vid_info; + struct vlan_info *vlan_info; + int err; + + ASSERT_RTNL(); + + vlan_info = rtnl_dereference(by_dev->vlan_info); + if (!vlan_info) + return 0; + + list_for_each_entry(vid_info, &vlan_info->vid_list, list) { + err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); + if (err) + goto unwind; + } + return 0; + +unwind: + list_for_each_entry_continue_reverse(vid_info, + &vlan_info->vid_list, + list) { + vlan_vid_del(dev, vid_info->proto, vid_info->vid); + } + + return err; +} +EXPORT_SYMBOL(vlan_vids_add_by_dev); + +void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ + struct vlan_vid_info *vid_info; + struct vlan_info *vlan_info; + + ASSERT_RTNL(); + + vlan_info = rtnl_dereference(by_dev->vlan_info); + if (!vlan_info) + return; + + list_for_each_entry(vid_info, &vlan_info->vid_list, list) + vlan_vid_del(dev, vid_info->proto, vid_info->vid); +} +EXPORT_SYMBOL(vlan_vids_del_by_dev); + +bool vlan_uses_dev(const struct net_device *dev) +{ + struct vlan_info *vlan_info; + + ASSERT_RTNL(); + + vlan_info = rtnl_dereference(dev->vlan_info); + if (!vlan_info) + return false; + return vlan_info->grp.nr_vlan_devs ? true : false; +} +EXPORT_SYMBOL(vlan_uses_dev); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c new file mode 100644 index 000000000..01d7ba840 --- /dev/null +++ b/net/8021q/vlan_dev.c @@ -0,0 +1,804 @@ +/* -*- linux-c -*- + * INET 802.1Q VLAN + * Ethernet-type device handling. + * + * Authors: Ben Greear <greearb@candelatech.com> + * Please send support related email to: netdev@vger.kernel.org + * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html + * + * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com> + * - reset skb->pkt_type on incoming packets when MAC was changed + * - see that changed MAC is saddr for outgoing packets + * Oct 20, 2001: Ard van Breeman: + * - Fix MC-list, finally. + * - Flush MC-list on VLAN destroy. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <net/arp.h> + +#include "vlan.h" +#include "vlanproc.h" +#include <linux/if_vlan.h> +#include <linux/netpoll.h> + +/* + * Create the VLAN header for an arbitrary protocol layer + * + * saddr=NULL means use device source address + * daddr=NULL means leave destination address (eg unresolved arp) + * + * This is called when the SKB is moving down the stack towards the + * physical devices. + */ +static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, + unsigned int len) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_hdr *vhdr; + unsigned int vhdrlen = 0; + u16 vlan_tci = 0; + int rc; + + if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) { + vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); + + vlan_tci = vlan->vlan_id; + vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); + vhdr->h_vlan_TCI = htons(vlan_tci); + + /* + * Set the protocol type. For a packet of type ETH_P_802_3/2 we + * put the length in here instead. + */ + if (type != ETH_P_802_3 && type != ETH_P_802_2) + vhdr->h_vlan_encapsulated_proto = htons(type); + else + vhdr->h_vlan_encapsulated_proto = htons(len); + + skb->protocol = vlan->vlan_proto; + type = ntohs(vlan->vlan_proto); + vhdrlen = VLAN_HLEN; + } + + /* Before delegating work to the lower layer, enter our MAC-address */ + if (saddr == NULL) + saddr = dev->dev_addr; + + /* Now make the underlying real hard header */ + dev = vlan->real_dev; + rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen); + if (rc > 0) + rc += vhdrlen; + return rc; +} + +static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) +{ +#ifdef CONFIG_NET_POLL_CONTROLLER + if (vlan->netpoll) + netpoll_send_skb(vlan->netpoll, skb); +#else + BUG(); +#endif + return NETDEV_TX_OK; +} + +static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); + unsigned int len; + int ret; + + /* Handle non-VLAN frames if they are sent to us, for example by DHCP. + * + * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING + * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... + */ + if (veth->h_vlan_proto != vlan->vlan_proto || + vlan->flags & VLAN_FLAG_REORDER_HDR) { + u16 vlan_tci; + vlan_tci = vlan->vlan_id; + vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); + __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); + } + + skb->dev = vlan->real_dev; + len = skb->len; + if (unlikely(netpoll_tx_running(dev))) + return vlan_netpoll_send_skb(vlan, skb); + + ret = dev_queue_xmit(skb); + + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + struct vlan_pcpu_stats *stats; + + stats = this_cpu_ptr(vlan->vlan_pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); + } else { + this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); + } + + return ret; +} + +static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) +{ + /* TODO: gotta make sure the underlying layer can handle it, + * maybe an IFF_VLAN_CAPABLE flag for devices? + */ + if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu) + return -ERANGE; + + dev->mtu = new_mtu; + + return 0; +} + +void vlan_dev_set_ingress_priority(const struct net_device *dev, + u32 skb_prio, u16 vlan_prio) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + + if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) + vlan->nr_ingress_mappings--; + else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) + vlan->nr_ingress_mappings++; + + vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; +} + +int vlan_dev_set_egress_priority(const struct net_device *dev, + u32 skb_prio, u16 vlan_prio) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_priority_tci_mapping *mp = NULL; + struct vlan_priority_tci_mapping *np; + u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; + + /* See if a priority mapping exists.. */ + mp = vlan->egress_priority_map[skb_prio & 0xF]; + while (mp) { + if (mp->priority == skb_prio) { + if (mp->vlan_qos && !vlan_qos) + vlan->nr_egress_mappings--; + else if (!mp->vlan_qos && vlan_qos) + vlan->nr_egress_mappings++; + mp->vlan_qos = vlan_qos; + return 0; + } + mp = mp->next; + } + + /* Create a new mapping then. */ + mp = vlan->egress_priority_map[skb_prio & 0xF]; + np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL); + if (!np) + return -ENOBUFS; + + np->next = mp; + np->priority = skb_prio; + np->vlan_qos = vlan_qos; + /* Before inserting this element in hash table, make sure all its fields + * are committed to memory. + * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() + */ + smp_wmb(); + vlan->egress_priority_map[skb_prio & 0xF] = np; + if (vlan_qos) + vlan->nr_egress_mappings++; + return 0; +} + +/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ +int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + u32 old_flags = vlan->flags; + + if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | + VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP)) + return -EINVAL; + + vlan->flags = (old_flags & ~mask) | (flags & mask); + + if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) { + if (vlan->flags & VLAN_FLAG_GVRP) + vlan_gvrp_request_join(dev); + else + vlan_gvrp_request_leave(dev); + } + + if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) { + if (vlan->flags & VLAN_FLAG_MVRP) + vlan_mvrp_request_join(dev); + else + vlan_mvrp_request_leave(dev); + } + return 0; +} + +void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) +{ + strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); +} + +static int vlan_dev_open(struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; + int err; + + if (!(real_dev->flags & IFF_UP) && + !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) + return -ENETDOWN; + + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { + err = dev_uc_add(real_dev, dev->dev_addr); + if (err < 0) + goto out; + } + + if (dev->flags & IFF_ALLMULTI) { + err = dev_set_allmulti(real_dev, 1); + if (err < 0) + goto del_unicast; + } + if (dev->flags & IFF_PROMISC) { + err = dev_set_promiscuity(real_dev, 1); + if (err < 0) + goto clear_allmulti; + } + + ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr); + + if (vlan->flags & VLAN_FLAG_GVRP) + vlan_gvrp_request_join(dev); + + if (vlan->flags & VLAN_FLAG_MVRP) + vlan_mvrp_request_join(dev); + + if (netif_carrier_ok(real_dev)) + netif_carrier_on(dev); + return 0; + +clear_allmulti: + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(real_dev, -1); +del_unicast: + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) + dev_uc_del(real_dev, dev->dev_addr); +out: + netif_carrier_off(dev); + return err; +} + +static int vlan_dev_stop(struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; + + dev_mc_unsync(real_dev, dev); + dev_uc_unsync(real_dev, dev); + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(real_dev, -1); + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(real_dev, -1); + + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) + dev_uc_del(real_dev, dev->dev_addr); + + netif_carrier_off(dev); + return 0; +} + +static int vlan_dev_set_mac_address(struct net_device *dev, void *p) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (!(dev->flags & IFF_UP)) + goto out; + + if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { + err = dev_uc_add(real_dev, addr->sa_data); + if (err < 0) + return err; + } + + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) + dev_uc_del(real_dev, dev->dev_addr); + +out: + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; +} + +static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + struct ifreq ifrr; + int err = -EOPNOTSUPP; + + strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); + ifrr.ifr_ifru = ifr->ifr_ifru; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + case SIOCSHWTSTAMP: + case SIOCGHWTSTAMP: + if (netif_device_present(real_dev) && ops->ndo_do_ioctl) + err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd); + break; + } + + if (!err) + ifr->ifr_ifru = ifrr.ifr_ifru; + + return err; +} + +static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int err = 0; + + if (netif_device_present(real_dev) && ops->ndo_neigh_setup) + err = ops->ndo_neigh_setup(real_dev, pa); + + return err; +} + +#if IS_ENABLED(CONFIG_FCOE) +static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int rc = 0; + + if (ops->ndo_fcoe_ddp_setup) + rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc); + + return rc; +} + +static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int len = 0; + + if (ops->ndo_fcoe_ddp_done) + len = ops->ndo_fcoe_ddp_done(real_dev, xid); + + return len; +} + +static int vlan_dev_fcoe_enable(struct net_device *dev) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int rc = -EINVAL; + + if (ops->ndo_fcoe_enable) + rc = ops->ndo_fcoe_enable(real_dev); + return rc; +} + +static int vlan_dev_fcoe_disable(struct net_device *dev) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int rc = -EINVAL; + + if (ops->ndo_fcoe_disable) + rc = ops->ndo_fcoe_disable(real_dev); + return rc; +} + +static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int rc = -EINVAL; + + if (ops->ndo_fcoe_get_wwn) + rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); + return rc; +} + +static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + const struct net_device_ops *ops = real_dev->netdev_ops; + int rc = 0; + + if (ops->ndo_fcoe_ddp_target) + rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); + + return rc; +} +#endif + +static void vlan_dev_change_rx_flags(struct net_device *dev, int change) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); + } +} + +static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) +{ + dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); + dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); +} + +/* + * vlan network devices have devices nesting below it, and are a special + * "super class" of normal network devices; split their locks off into a + * separate class since they always nest. + */ +static struct lock_class_key vlan_netdev_xmit_lock_key; +static struct lock_class_key vlan_netdev_addr_lock_key; + +static void vlan_dev_set_lockdep_one(struct net_device *dev, + struct netdev_queue *txq, + void *_subclass) +{ + lockdep_set_class_and_subclass(&txq->_xmit_lock, + &vlan_netdev_xmit_lock_key, + *(int *)_subclass); +} + +static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) +{ + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &vlan_netdev_addr_lock_key, + subclass); + netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); +} + +static int vlan_dev_get_lock_subclass(struct net_device *dev) +{ + return vlan_dev_priv(dev)->nest_level; +} + +static const struct header_ops vlan_header_ops = { + .create = vlan_dev_hard_header, + .parse = eth_header_parse, +}; + +static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, + unsigned int len) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; + + if (saddr == NULL) + saddr = dev->dev_addr; + + return dev_hard_header(skb, real_dev, type, daddr, saddr, len); +} + +static const struct header_ops vlan_passthru_header_ops = { + .create = vlan_passthru_hard_header, + .parse = eth_header_parse, +}; + +static struct device_type vlan_type = { + .name = "vlan", +}; + +static const struct net_device_ops vlan_netdev_ops; + +static int vlan_dev_init(struct net_device *dev) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + + netif_carrier_off(dev); + + /* IFF_BROADCAST|IFF_MULTICAST; ??? */ + dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | + IFF_MASTER | IFF_SLAVE); + dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | + (1<<__LINK_STATE_DORMANT))) | + (1<<__LINK_STATE_PRESENT); + + dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG | + NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | + NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM | + NETIF_F_ALL_FCOE; + + dev->features |= real_dev->vlan_features | NETIF_F_LLTX | + NETIF_F_GSO_SOFTWARE; + dev->gso_max_size = real_dev->gso_max_size; + if (dev->features & NETIF_F_VLAN_FEATURES) + netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); + + dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE; + + /* ipv6 shared card related stuff */ + dev->dev_id = real_dev->dev_id; + + if (is_zero_ether_addr(dev->dev_addr)) + eth_hw_addr_inherit(dev, real_dev); + if (is_zero_ether_addr(dev->broadcast)) + memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); + +#if IS_ENABLED(CONFIG_FCOE) + dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; +#endif + + dev->needed_headroom = real_dev->needed_headroom; + if (vlan_hw_offload_capable(real_dev->features, + vlan_dev_priv(dev)->vlan_proto)) { + dev->header_ops = &vlan_passthru_header_ops; + dev->hard_header_len = real_dev->hard_header_len; + } else { + dev->header_ops = &vlan_header_ops; + dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; + } + + dev->netdev_ops = &vlan_netdev_ops; + + SET_NETDEV_DEVTYPE(dev, &vlan_type); + + vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev)); + + vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); + if (!vlan_dev_priv(dev)->vlan_pcpu_stats) + return -ENOMEM; + + return 0; +} + +static void vlan_dev_uninit(struct net_device *dev) +{ + struct vlan_priority_tci_mapping *pm; + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { + while ((pm = vlan->egress_priority_map[i]) != NULL) { + vlan->egress_priority_map[i] = pm->next; + kfree(pm); + } + } +} + +static netdev_features_t vlan_dev_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + netdev_features_t old_features = features; + + features = netdev_intersect_features(features, real_dev->vlan_features); + features |= NETIF_F_RXCSUM; + features = netdev_intersect_features(features, real_dev->features); + + features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE); + features |= NETIF_F_LLTX; + + return features; +} + +static int vlan_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + + return __ethtool_get_settings(vlan->real_dev, cmd); +} + +static void vlan_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, vlan_fullname, sizeof(info->driver)); + strlcpy(info->version, vlan_version, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); +} + +static int vlan_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops; + + if (ops->get_ts_info) { + return ops->get_ts_info(vlan->real_dev, info); + } else { + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + } + + return 0; +} + +static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct vlan_pcpu_stats *p; + u32 rx_errors = 0, tx_dropped = 0; + int i; + + for_each_possible_cpu(i) { + u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; + unsigned int start; + + p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + rxpackets = p->rx_packets; + rxbytes = p->rx_bytes; + rxmulticast = p->rx_multicast; + txpackets = p->tx_packets; + txbytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + + stats->rx_packets += rxpackets; + stats->rx_bytes += rxbytes; + stats->multicast += rxmulticast; + stats->tx_packets += txpackets; + stats->tx_bytes += txbytes; + /* rx_errors & tx_dropped are u32 */ + rx_errors += p->rx_errors; + tx_dropped += p->tx_dropped; + } + stats->rx_errors = rx_errors; + stats->tx_dropped = tx_dropped; + + return stats; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vlan_dev_poll_controller(struct net_device *dev) +{ + return; +} + +static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; + struct netpoll *netpoll; + int err = 0; + + netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); + err = -ENOMEM; + if (!netpoll) + goto out; + + err = __netpoll_setup(netpoll, real_dev); + if (err) { + kfree(netpoll); + goto out; + } + + vlan->netpoll = netpoll; + +out: + return err; +} + +static void vlan_dev_netpoll_cleanup(struct net_device *dev) +{ + struct vlan_dev_priv *vlan= vlan_dev_priv(dev); + struct netpoll *netpoll = vlan->netpoll; + + if (!netpoll) + return; + + vlan->netpoll = NULL; + + __netpoll_free_async(netpoll); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int vlan_dev_get_iflink(const struct net_device *dev) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + + return real_dev->ifindex; +} + +static const struct ethtool_ops vlan_ethtool_ops = { + .get_settings = vlan_ethtool_get_settings, + .get_drvinfo = vlan_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = vlan_ethtool_get_ts_info, +}; + +static const struct net_device_ops vlan_netdev_ops = { + .ndo_change_mtu = vlan_dev_change_mtu, + .ndo_init = vlan_dev_init, + .ndo_uninit = vlan_dev_uninit, + .ndo_open = vlan_dev_open, + .ndo_stop = vlan_dev_stop, + .ndo_start_xmit = vlan_dev_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = vlan_dev_set_mac_address, + .ndo_set_rx_mode = vlan_dev_set_rx_mode, + .ndo_change_rx_flags = vlan_dev_change_rx_flags, + .ndo_do_ioctl = vlan_dev_ioctl, + .ndo_neigh_setup = vlan_dev_neigh_setup, + .ndo_get_stats64 = vlan_dev_get_stats64, +#if IS_ENABLED(CONFIG_FCOE) + .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, + .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, + .ndo_fcoe_enable = vlan_dev_fcoe_enable, + .ndo_fcoe_disable = vlan_dev_fcoe_disable, + .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, + .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vlan_dev_poll_controller, + .ndo_netpoll_setup = vlan_dev_netpoll_setup, + .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, +#endif + .ndo_fix_features = vlan_dev_fix_features, + .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, + .ndo_get_iflink = vlan_dev_get_iflink, +}; + +static void vlan_dev_free(struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + + free_percpu(vlan->vlan_pcpu_stats); + vlan->vlan_pcpu_stats = NULL; + free_netdev(dev); +} + +void vlan_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->priv_flags |= IFF_802_1Q_VLAN; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + netif_keep_dst(dev); + dev->tx_queue_len = 0; + + dev->netdev_ops = &vlan_netdev_ops; + dev->destructor = vlan_dev_free; + dev->ethtool_ops = &vlan_ethtool_ops; + + eth_zero_addr(dev->broadcast); +} diff --git a/net/8021q/vlan_gvrp.c b/net/8021q/vlan_gvrp.c new file mode 100644 index 000000000..66a80320b --- /dev/null +++ b/net/8021q/vlan_gvrp.c @@ -0,0 +1,70 @@ +/* + * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP) + * + * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#include <linux/types.h> +#include <linux/if_vlan.h> +#include <net/garp.h> +#include "vlan.h" + +#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 } + +enum gvrp_attributes { + GVRP_ATTR_INVALID, + GVRP_ATTR_VID, + __GVRP_ATTR_MAX +}; +#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1) + +static struct garp_application vlan_gvrp_app __read_mostly = { + .proto.group_address = GARP_GVRP_ADDRESS, + .maxattr = GVRP_ATTR_MAX, + .type = GARP_APPLICATION_GVRP, +}; + +int vlan_gvrp_request_join(const struct net_device *dev) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + __be16 vlan_id = htons(vlan->vlan_id); + + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return 0; + return garp_request_join(vlan->real_dev, &vlan_gvrp_app, + &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); +} + +void vlan_gvrp_request_leave(const struct net_device *dev) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + __be16 vlan_id = htons(vlan->vlan_id); + + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return; + garp_request_leave(vlan->real_dev, &vlan_gvrp_app, + &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); +} + +int vlan_gvrp_init_applicant(struct net_device *dev) +{ + return garp_init_applicant(dev, &vlan_gvrp_app); +} + +void vlan_gvrp_uninit_applicant(struct net_device *dev) +{ + garp_uninit_applicant(dev, &vlan_gvrp_app); +} + +int __init vlan_gvrp_init(void) +{ + return garp_register_application(&vlan_gvrp_app); +} + +void vlan_gvrp_uninit(void) +{ + garp_unregister_application(&vlan_gvrp_app); +} diff --git a/net/8021q/vlan_mvrp.c b/net/8021q/vlan_mvrp.c new file mode 100644 index 000000000..e0fe09180 --- /dev/null +++ b/net/8021q/vlan_mvrp.c @@ -0,0 +1,76 @@ +/* + * IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP) + * + * Copyright (c) 2012 Massachusetts Institute of Technology + * + * Adapted from code in net/8021q/vlan_gvrp.c + * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <net/mrp.h> +#include "vlan.h" + +#define MRP_MVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 } + +enum mvrp_attributes { + MVRP_ATTR_INVALID, + MVRP_ATTR_VID, + __MVRP_ATTR_MAX +}; +#define MVRP_ATTR_MAX (__MVRP_ATTR_MAX - 1) + +static struct mrp_application vlan_mrp_app __read_mostly = { + .type = MRP_APPLICATION_MVRP, + .maxattr = MVRP_ATTR_MAX, + .pkttype.type = htons(ETH_P_MVRP), + .group_address = MRP_MVRP_ADDRESS, + .version = 0, +}; + +int vlan_mvrp_request_join(const struct net_device *dev) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + __be16 vlan_id = htons(vlan->vlan_id); + + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return 0; + return mrp_request_join(vlan->real_dev, &vlan_mrp_app, + &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); +} + +void vlan_mvrp_request_leave(const struct net_device *dev) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + __be16 vlan_id = htons(vlan->vlan_id); + + if (vlan->vlan_proto != htons(ETH_P_8021Q)) + return; + mrp_request_leave(vlan->real_dev, &vlan_mrp_app, + &vlan_id, sizeof(vlan_id), MVRP_ATTR_VID); +} + +int vlan_mvrp_init_applicant(struct net_device *dev) +{ + return mrp_init_applicant(dev, &vlan_mrp_app); +} + +void vlan_mvrp_uninit_applicant(struct net_device *dev) +{ + mrp_uninit_applicant(dev, &vlan_mrp_app); +} + +int __init vlan_mvrp_init(void) +{ + return mrp_register_application(&vlan_mrp_app); +} + +void vlan_mvrp_uninit(void) +{ + mrp_unregister_application(&vlan_mrp_app); +} diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c new file mode 100644 index 000000000..c92b52f37 --- /dev/null +++ b/net/8021q/vlan_netlink.c @@ -0,0 +1,273 @@ +/* + * VLAN netlink control interface + * + * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/module.h> +#include <net/net_namespace.h> +#include <net/netlink.h> +#include <net/rtnetlink.h> +#include "vlan.h" + + +static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = { + [IFLA_VLAN_ID] = { .type = NLA_U16 }, + [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, + [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, + [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, + [IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 }, +}; + +static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { + [IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) }, +}; + + +static inline int vlan_validate_qos_map(struct nlattr *attr) +{ + if (!attr) + return 0; + return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy); +} + +static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + struct ifla_vlan_flags *flags; + u16 id; + int err; + + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + + if (!data) + return -EINVAL; + + if (data[IFLA_VLAN_PROTOCOL]) { + switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) { + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): + break; + default: + return -EPROTONOSUPPORT; + } + } + + if (data[IFLA_VLAN_ID]) { + id = nla_get_u16(data[IFLA_VLAN_ID]); + if (id >= VLAN_VID_MASK) + return -ERANGE; + } + if (data[IFLA_VLAN_FLAGS]) { + flags = nla_data(data[IFLA_VLAN_FLAGS]); + if ((flags->flags & flags->mask) & + ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | + VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP)) + return -EINVAL; + } + + err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]); + if (err < 0) + return err; + err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]); + if (err < 0) + return err; + return 0; +} + +static int vlan_changelink(struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct ifla_vlan_flags *flags; + struct ifla_vlan_qos_mapping *m; + struct nlattr *attr; + int rem; + + if (data[IFLA_VLAN_FLAGS]) { + flags = nla_data(data[IFLA_VLAN_FLAGS]); + vlan_dev_change_flags(dev, flags->flags, flags->mask); + } + if (data[IFLA_VLAN_INGRESS_QOS]) { + nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { + m = nla_data(attr); + vlan_dev_set_ingress_priority(dev, m->to, m->from); + } + } + if (data[IFLA_VLAN_EGRESS_QOS]) { + nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) { + m = nla_data(attr); + vlan_dev_set_egress_priority(dev, m->from, m->to); + } + } + return 0; +} + +static int vlan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev; + __be16 proto; + int err; + + if (!data[IFLA_VLAN_ID]) + return -EINVAL; + + if (!tb[IFLA_LINK]) + return -EINVAL; + real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!real_dev) + return -ENODEV; + + if (data[IFLA_VLAN_PROTOCOL]) + proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]); + else + proto = htons(ETH_P_8021Q); + + vlan->vlan_proto = proto; + vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); + vlan->real_dev = real_dev; + vlan->flags = VLAN_FLAG_REORDER_HDR; + + err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id); + if (err < 0) + return err; + + if (!tb[IFLA_MTU]) + dev->mtu = real_dev->mtu; + else if (dev->mtu > real_dev->mtu) + return -EINVAL; + + err = vlan_changelink(dev, tb, data); + if (err < 0) + return err; + + return register_vlan_dev(dev); +} + +static inline size_t vlan_qos_map_size(unsigned int n) +{ + if (n == 0) + return 0; + /* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */ + return nla_total_size(sizeof(struct nlattr)) + + nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n; +} + +static size_t vlan_get_size(const struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + + return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ + nla_total_size(2) + /* IFLA_VLAN_ID */ + nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */ + vlan_qos_map_size(vlan->nr_ingress_mappings) + + vlan_qos_map_size(vlan->nr_egress_mappings); +} + +static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct vlan_priority_tci_mapping *pm; + struct ifla_vlan_flags f; + struct ifla_vlan_qos_mapping m; + struct nlattr *nest; + unsigned int i; + + if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) || + nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id)) + goto nla_put_failure; + if (vlan->flags) { + f.flags = vlan->flags; + f.mask = ~0; + if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f)) + goto nla_put_failure; + } + if (vlan->nr_ingress_mappings) { + nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); + if (nest == NULL) + goto nla_put_failure; + + for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) { + if (!vlan->ingress_priority_map[i]) + continue; + + m.from = i; + m.to = vlan->ingress_priority_map[i]; + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; + } + nla_nest_end(skb, nest); + } + + if (vlan->nr_egress_mappings) { + nest = nla_nest_start(skb, IFLA_VLAN_EGRESS_QOS); + if (nest == NULL) + goto nla_put_failure; + + for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { + for (pm = vlan->egress_priority_map[i]; pm; + pm = pm->next) { + if (!pm->vlan_qos) + continue; + + m.from = pm->priority; + m.to = (pm->vlan_qos >> 13) & 0x7; + if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, + sizeof(m), &m)) + goto nla_put_failure; + } + } + nla_nest_end(skb, nest); + } + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static struct net *vlan_get_link_net(const struct net_device *dev) +{ + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + + return dev_net(real_dev); +} + +struct rtnl_link_ops vlan_link_ops __read_mostly = { + .kind = "vlan", + .maxtype = IFLA_VLAN_MAX, + .policy = vlan_policy, + .priv_size = sizeof(struct vlan_dev_priv), + .setup = vlan_setup, + .validate = vlan_validate, + .newlink = vlan_newlink, + .changelink = vlan_changelink, + .dellink = unregister_vlan_dev, + .get_size = vlan_get_size, + .fill_info = vlan_fill_info, + .get_link_net = vlan_get_link_net, +}; + +int __init vlan_netlink_init(void) +{ + return rtnl_link_register(&vlan_link_ops); +} + +void __exit vlan_netlink_fini(void) +{ + rtnl_link_unregister(&vlan_link_ops); +} + +MODULE_ALIAS_RTNL_LINK("vlan"); diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c new file mode 100644 index 000000000..ae63cf72a --- /dev/null +++ b/net/8021q/vlanproc.c @@ -0,0 +1,326 @@ +/****************************************************************************** + * vlanproc.c VLAN Module. /proc filesystem interface. + * + * This module is completely hardware-independent and provides + * access to the router using Linux /proc filesystem. + * + * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c + * by: Gene Kozin <genek@compuserve.com> + * + * Copyright: (c) 1998 Ben Greear + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * ============================================================================ + * Jan 20, 1998 Ben Greear Initial Version + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/fs.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> +#include "vlanproc.h" +#include "vlan.h" + +/****** Function Prototypes *************************************************/ + +/* Methods for preparing data for reading proc entries */ +static int vlan_seq_show(struct seq_file *seq, void *v); +static void *vlan_seq_start(struct seq_file *seq, loff_t *pos); +static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos); +static void vlan_seq_stop(struct seq_file *seq, void *); +static int vlandev_seq_show(struct seq_file *seq, void *v); + +/* + * Global Data + */ + + +/* + * Names of the proc directory entries + */ + +static const char name_root[] = "vlan"; +static const char name_conf[] = "config"; + +/* + * Structures for interfacing with the /proc filesystem. + * VLAN creates its own directory /proc/net/vlan with the following + * entries: + * config device status/configuration + * <device> entry for each device + */ + +/* + * Generic /proc/net/vlan/<file> file and inode operations + */ + +static const struct seq_operations vlan_seq_ops = { + .start = vlan_seq_start, + .next = vlan_seq_next, + .stop = vlan_seq_stop, + .show = vlan_seq_show, +}; + +static int vlan_seq_open(struct inode *inode, struct file *file) +{ + return seq_open_net(inode, file, &vlan_seq_ops, + sizeof(struct seq_net_private)); +} + +static const struct file_operations vlan_fops = { + .owner = THIS_MODULE, + .open = vlan_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net, +}; + +/* + * /proc/net/vlan/<device> file and inode operations + */ + +static int vlandev_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, vlandev_seq_show, PDE_DATA(inode)); +} + +static const struct file_operations vlandev_fops = { + .owner = THIS_MODULE, + .open = vlandev_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Proc filesystem directory entries. + */ + +/* Strings */ +static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { + [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", + [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", + [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", + [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID", +}; +/* + * Interface functions + */ + +/* + * Clean up /proc/net/vlan entries + */ + +void vlan_proc_cleanup(struct net *net) +{ + struct vlan_net *vn = net_generic(net, vlan_net_id); + + if (vn->proc_vlan_conf) + remove_proc_entry(name_conf, vn->proc_vlan_dir); + + if (vn->proc_vlan_dir) + remove_proc_entry(name_root, net->proc_net); + + /* Dynamically added entries should be cleaned up as their vlan_device + * is removed, so we should not have to take care of it here... + */ +} + +/* + * Create /proc/net/vlan entries + */ + +int __net_init vlan_proc_init(struct net *net) +{ + struct vlan_net *vn = net_generic(net, vlan_net_id); + + vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net); + if (!vn->proc_vlan_dir) + goto err; + + vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR, + vn->proc_vlan_dir, &vlan_fops); + if (!vn->proc_vlan_conf) + goto err; + return 0; + +err: + pr_err("can't create entry in proc filesystem!\n"); + vlan_proc_cleanup(net); + return -ENOBUFS; +} + +/* + * Add directory entry for VLAN device. + */ + +int vlan_proc_add_dev(struct net_device *vlandev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); + + if (!strcmp(vlandev->name, name_conf)) + return -EINVAL; + vlan->dent = + proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR, + vn->proc_vlan_dir, &vlandev_fops, vlandev); + if (!vlan->dent) + return -ENOBUFS; + return 0; +} + +/* + * Delete directory entry for VLAN device. + */ +int vlan_proc_rem_dev(struct net_device *vlandev) +{ + /** NOTE: This will consume the memory pointed to by dent, it seems. */ + proc_remove(vlan_dev_priv(vlandev)->dent); + vlan_dev_priv(vlandev)->dent = NULL; + return 0; +} + +/****** Proc filesystem entry points ****************************************/ + +/* + * The following few functions build the content of /proc/net/vlan/config + */ + +/* start read of /proc/net/vlan/config */ +static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) + __acquires(rcu) +{ + struct net_device *dev; + struct net *net = seq_file_net(seq); + loff_t i = 1; + + rcu_read_lock(); + if (*pos == 0) + return SEQ_START_TOKEN; + + for_each_netdev_rcu(net, dev) { + if (!is_vlan_dev(dev)) + continue; + + if (i++ == *pos) + return dev; + } + + return NULL; +} + +static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct net_device *dev; + struct net *net = seq_file_net(seq); + + ++*pos; + + dev = v; + if (v == SEQ_START_TOKEN) + dev = net_device_entry(&net->dev_base_head); + + for_each_netdev_continue_rcu(net, dev) { + if (!is_vlan_dev(dev)) + continue; + + return dev; + } + + return NULL; +} + +static void vlan_seq_stop(struct seq_file *seq, void *v) + __releases(rcu) +{ + rcu_read_unlock(); +} + +static int vlan_seq_show(struct seq_file *seq, void *v) +{ + struct net *net = seq_file_net(seq); + struct vlan_net *vn = net_generic(net, vlan_net_id); + + if (v == SEQ_START_TOKEN) { + const char *nmtype = NULL; + + seq_puts(seq, "VLAN Dev name | VLAN ID\n"); + + if (vn->name_type < ARRAY_SIZE(vlan_name_type_str)) + nmtype = vlan_name_type_str[vn->name_type]; + + seq_printf(seq, "Name-Type: %s\n", + nmtype ? nmtype : "UNKNOWN"); + } else { + const struct net_device *vlandev = v; + const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + + seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, + vlan->vlan_id, vlan->real_dev->name); + } + return 0; +} + +static int vlandev_seq_show(struct seq_file *seq, void *offset) +{ + struct net_device *vlandev = (struct net_device *) seq->private; + const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats; + static const char fmt64[] = "%30s %12llu\n"; + int i; + + if (!is_vlan_dev(vlandev)) + return 0; + + stats = dev_get_stats(vlandev, &temp); + seq_printf(seq, + "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", + vlandev->name, vlan->vlan_id, + (int)(vlan->flags & 1), vlandev->priv_flags); + + seq_printf(seq, fmt64, "total frames received", stats->rx_packets); + seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); + seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); + seq_puts(seq, "\n"); + seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); + seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); + seq_printf(seq, "Device: %s", vlan->real_dev->name); + /* now show all PRIORITY mappings relating to this VLAN */ + seq_printf(seq, "\nINGRESS priority mappings: " + "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", + vlan->ingress_priority_map[0], + vlan->ingress_priority_map[1], + vlan->ingress_priority_map[2], + vlan->ingress_priority_map[3], + vlan->ingress_priority_map[4], + vlan->ingress_priority_map[5], + vlan->ingress_priority_map[6], + vlan->ingress_priority_map[7]); + + seq_printf(seq, " EGRESS priority mappings: "); + for (i = 0; i < 16; i++) { + const struct vlan_priority_tci_mapping *mp + = vlan->egress_priority_map[i]; + while (mp) { + seq_printf(seq, "%u:%hu ", + mp->priority, ((mp->vlan_qos >> 13) & 0x7)); + mp = mp->next; + } + } + seq_puts(seq, "\n"); + + return 0; +} diff --git a/net/8021q/vlanproc.h b/net/8021q/vlanproc.h new file mode 100644 index 000000000..063f60a3d --- /dev/null +++ b/net/8021q/vlanproc.h @@ -0,0 +1,20 @@ +#ifndef __BEN_VLAN_PROC_INC__ +#define __BEN_VLAN_PROC_INC__ + +#ifdef CONFIG_PROC_FS +struct net; + +int vlan_proc_init(struct net *net); +int vlan_proc_rem_dev(struct net_device *vlandev); +int vlan_proc_add_dev(struct net_device *vlandev); +void vlan_proc_cleanup(struct net *net); + +#else /* No CONFIG_PROC_FS */ + +#define vlan_proc_init(net) (0) +#define vlan_proc_cleanup(net) do {} while (0) +#define vlan_proc_add_dev(dev) ({(void)(dev), 0; }) +#define vlan_proc_rem_dev(dev) ({(void)(dev), 0; }) +#endif + +#endif /* !(__BEN_VLAN_PROC_INC__) */ |