diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
commit | 57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch) | |
tree | 5e910f0e82173f4ef4f51111366a3f1299037a7b /include/linux/if_vlan.h |
Initial import
Diffstat (limited to 'include/linux/if_vlan.h')
-rw-r--r-- | include/linux/if_vlan.h | 631 |
1 files changed, 631 insertions, 0 deletions
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h new file mode 100644 index 000000000..920e4457c --- /dev/null +++ b/include/linux/if_vlan.h @@ -0,0 +1,631 @@ +/* + * VLAN An implementation of 802.1Q VLAN tagging. + * + * Authors: Ben Greear <greearb@candelatech.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef _LINUX_IF_VLAN_H_ +#define _LINUX_IF_VLAN_H_ + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/bug.h> +#include <uapi/linux/if_vlan.h> + +#define VLAN_HLEN 4 /* The additional bytes required by VLAN + * (in addition to the Ethernet header) + */ +#define VLAN_ETH_HLEN 18 /* Total octets in header. */ +#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ + +/* + * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan + */ +#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ + +/* + * struct vlan_hdr - vlan header + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +/** + * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) + * @h_dest: destination ethernet address + * @h_source: source ethernet address + * @h_vlan_proto: ethernet protocol + * @h_vlan_TCI: priority and VLAN ID + * @h_vlan_encapsulated_proto: packet type ID or len + */ +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + +#include <linux/skbuff.h> + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb_mac_header(skb); +} + +#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ +#define VLAN_PRIO_SHIFT 13 +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ +#define VLAN_TAG_PRESENT VLAN_CFI_MASK +#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ +#define VLAN_N_VID 4096 + +/* found in socket.c */ +extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); + +static inline bool is_vlan_dev(struct net_device *dev) +{ + return dev->priv_flags & IFF_802_1Q_VLAN; +} + +#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) + +/** + * struct vlan_pcpu_stats - VLAN percpu rx/tx stats + * @rx_packets: number of received packets + * @rx_bytes: number of received bytes + * @rx_multicast: number of received multicast packets + * @tx_packets: number of transmitted packets + * @tx_bytes: number of transmitted bytes + * @syncp: synchronization point for 64bit counters + * @rx_errors: number of rx errors + * @tx_dropped: number of tx drops + */ +struct vlan_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errors; + u32 tx_dropped; +}; + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + +extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id); +extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); +extern u16 vlan_dev_vlan_id(const struct net_device *dev); +extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); + +/** + * struct vlan_priority_tci_mapping - vlan egress priority mappings + * @priority: skb priority + * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 + * @next: pointer to next struct + */ +struct vlan_priority_tci_mapping { + u32 priority; + u16 vlan_qos; + struct vlan_priority_tci_mapping *next; +}; + +struct proc_dir_entry; +struct netpoll; + +/** + * struct vlan_dev_priv - VLAN private device data + * @nr_ingress_mappings: number of ingress priority mappings + * @ingress_priority_map: ingress priority mappings + * @nr_egress_mappings: number of egress priority mappings + * @egress_priority_map: hash of egress priority mappings + * @vlan_proto: VLAN encapsulation protocol + * @vlan_id: VLAN identifier + * @flags: device flags + * @real_dev: underlying netdevice + * @real_dev_addr: address of underlying netdevice + * @dent: proc dir entry + * @vlan_pcpu_stats: ptr to percpu rx stats + */ +struct vlan_dev_priv { + unsigned int nr_ingress_mappings; + u32 ingress_priority_map[8]; + unsigned int nr_egress_mappings; + struct vlan_priority_tci_mapping *egress_priority_map[16]; + + __be16 vlan_proto; + u16 vlan_id; + u16 flags; + + struct net_device *real_dev; + unsigned char real_dev_addr[ETH_ALEN]; + + struct proc_dir_entry *dent; + struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *netpoll; +#endif + unsigned int nest_level; +}; + +static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static inline u16 +vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) +{ + struct vlan_priority_tci_mapping *mp; + + smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ + + mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; + while (mp) { + if (mp->priority == skprio) { + return mp->vlan_qos; /* This should already be shifted + * to mask correctly with the + * VLAN's TCI */ + } + mp = mp->next; + } + return 0; +} + +extern bool vlan_do_receive(struct sk_buff **skb); + +extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); +extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); + +extern int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev); +extern void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev); + +extern bool vlan_uses_dev(const struct net_device *dev); + +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG_ON(!is_vlan_dev(dev)); + return vlan_dev_priv(dev)->nest_level; +} +#else +static inline struct net_device * +__vlan_find_dev_deep_rcu(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id) +{ + return NULL; +} + +static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) +{ + BUG(); + return NULL; +} + +static inline u16 vlan_dev_vlan_id(const struct net_device *dev) +{ + BUG(); + return 0; +} + +static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) +{ + BUG(); + return 0; +} + +static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, + u32 skprio) +{ + return 0; +} + +static inline bool vlan_do_receive(struct sk_buff **skb) +{ + return false; +} + +static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) +{ + return 0; +} + +static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) +{ +} + +static inline int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ + return 0; +} + +static inline void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ +} + +static inline bool vlan_uses_dev(const struct net_device *dev) +{ + return false; +} +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG(); + return 0; +} +#endif + +static inline bool vlan_hw_offload_capable(netdev_features_t features, + __be16 proto) +{ + if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) + return true; + if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) + return true; + return false; +} + +/** + * __vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns error if skb_cow_head failes. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + struct vlan_ethhdr *veth; + + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; + + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the beginning of the new header. */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + skb->mac_header -= VLAN_HLEN; + + /* first, the ethernet type */ + veth->h_vlan_proto = vlan_proto; + + /* now, the TCI */ + veth->h_vlan_TCI = htons(vlan_tci); + + return 0; +} + +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + int err; + + err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (err) { + dev_kfree_skb_any(skb); + return NULL; + } + return skb; +} + +/** + * vlan_insert_tag_set_proto - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci) +{ + skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (skb) + skb->protocol = vlan_proto; + return skb; +} + +/* + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + skb_vlan_tag_get(skb)); + if (likely(skb)) + skb->vlan_tci = 0; + return skb; +} +/* + * vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the + * VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + if (skb_vlan_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); + return skb; +} + +/** + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + */ +static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + skb->vlan_proto = vlan_proto; + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; +} + +/** + * __vlan_get_tag - get the VLAN ID that is part of the payload + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if the skb is not of VLAN type + */ +static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + + if (veth->h_vlan_proto != htons(ETH_P_8021Q) && + veth->h_vlan_proto != htons(ETH_P_8021AD)) + return -EINVAL; + + *vlan_tci = ntohs(veth->h_vlan_TCI); + return 0; +} + +/** + * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if @skb->vlan_tci is not set correctly + */ +static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, + u16 *vlan_tci) +{ + if (skb_vlan_tag_present(skb)) { + *vlan_tci = skb_vlan_tag_get(skb); + return 0; + } else { + *vlan_tci = 0; + return -EINVAL; + } +} + +#define HAVE_VLAN_GET_TAG + +/** + * vlan_get_tag - get the VLAN ID from the skb + * @skb: skbuff to query + * @vlan_tci: buffer to store vlaue + * + * Returns error if the skb is not VLAN tagged + */ +static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { + return __vlan_hwaccel_get_tag(skb, vlan_tci); + } else { + return __vlan_get_tag(skb, vlan_tci); + } +} + +/** + * vlan_get_protocol - get protocol EtherType. + * @skb: skbuff to query + * @type: first vlan protocol + * @depth: buffer to store length of eth and vlan tags in bytes + * + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, + int *depth) +{ + unsigned int vlan_depth = skb->mac_len; + + /* if type is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at + * ETH_HLEN otherwise + */ + if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + if (vlan_depth) { + if (WARN_ON(vlan_depth < VLAN_HLEN)) + return 0; + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + do { + struct vlan_hdr *vh; + + if (unlikely(!pskb_may_pull(skb, + vlan_depth + VLAN_HLEN))) + return 0; + + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } while (type == htons(ETH_P_8021Q) || + type == htons(ETH_P_8021AD)); + } + + if (depth) + *depth = vlan_depth; + + return type; +} + +/** + * vlan_get_protocol - get protocol EtherType. + * @skb: skbuff to query + * + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +static inline __be16 vlan_get_protocol(struct sk_buff *skb) +{ + return __vlan_get_protocol(skb, skb->protocol, NULL); +} + +static inline void vlan_set_encap_proto(struct sk_buff *skb, + struct vlan_hdr *vhdr) +{ + __be16 proto; + unsigned short *rawp; + + /* + * Was a VLAN packet, grab the encapsulated protocol, which the layer + * three protocols care about. + */ + + proto = vhdr->h_vlan_encapsulated_proto; + if (ntohs(proto) >= ETH_P_802_3_MIN) { + skb->protocol = proto; + return; + } + + rawp = (unsigned short *)(vhdr + 1); + if (*rawp == 0xFFFF) + /* + * This is a magic hack to spot IPX packets. Older Novell + * breaks the protocol design and runs IPX over 802.3 without + * an 802.2 LLC layer. We look for FFFF which isn't a used + * 802.2 SSAP/DSAP. This won't work for fault tolerant netware + * but does for the rest. + */ + skb->protocol = htons(ETH_P_802_3); + else + /* + * Real 802.2 LLC + */ + skb->protocol = htons(ETH_P_802_2); +} + +/** + * skb_vlan_tagged - check if skb is vlan tagged. + * @skb: skbuff to query + * + * Returns true if the skb is tagged, regardless of whether it is hardware + * accelerated or not. + */ +static inline bool skb_vlan_tagged(const struct sk_buff *skb) +{ + if (!skb_vlan_tag_present(skb) && + likely(skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD))) + return false; + + return true; +} + +/** + * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. + * @skb: skbuff to query + * + * Returns true if the skb is tagged with multiple vlan headers, regardless + * of whether it is hardware accelerated or not. + */ +static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + if (!skb_vlan_tag_present(skb)) { + struct vlan_ethhdr *veh; + + if (likely(protocol != htons(ETH_P_8021Q) && + protocol != htons(ETH_P_8021AD))) + return false; + + veh = (struct vlan_ethhdr *)skb->data; + protocol = veh->h_vlan_encapsulated_proto; + } + + if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) + return false; + + return true; +} + +/** + * vlan_features_check - drop unsafe features for skb with multiple tags. + * @skb: skbuff to query + * @features: features to be checked + * + * Returns features without unsafe ones if the skb has multiple tags. + */ +static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, + netdev_features_t features) +{ + if (skb_vlan_tagged_multi(skb)) + features = netdev_intersect_features(features, + NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_FRAGLIST | + NETIF_F_GEN_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + return features; +} + +#endif /* !(_LINUX_IF_VLAN_H_) */ |