diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-08-05 17:04:01 -0300 |
commit | 57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch) | |
tree | 5e910f0e82173f4ef4f51111366a3f1299037a7b /drivers/net/ethernet/mellanox |
Initial import
Diffstat (limited to 'drivers/net/ethernet/mellanox')
62 files changed, 44920 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig new file mode 100644 index 000000000..8cf7563a8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -0,0 +1,24 @@ +# +# Mellanox driver configuration +# + +config NET_VENDOR_MELLANOX + bool "Mellanox devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + <http://www.tldp.org/docs.html#howto>. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Mellanox cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MELLANOX + +source "drivers/net/ethernet/mellanox/mlx4/Kconfig" +source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig" + +endif # NET_VENDOR_MELLANOX diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile new file mode 100644 index 000000000..38fe32ef5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Mellanox device drivers. +# + +obj-$(CONFIG_MLX4_CORE) += mlx4/ +obj-$(CONFIG_MLX5_CORE) += mlx5/core/ diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig new file mode 100644 index 000000000..1486ce902 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -0,0 +1,46 @@ +# +# Mellanox driver configuration +# + +config MLX4_EN + tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" + depends on PCI + select MLX4_CORE + select PTP_1588_CLOCK + ---help--- + This driver supports Mellanox Technologies ConnectX Ethernet + devices. + +config MLX4_EN_DCB + bool "Data Center Bridging (DCB) Support" + default y + depends on MLX4_EN && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + If set to N, will not be able to configure QoS and ratelimit attributes. + This flag is depended on the kernel's DCB support. + + If unsure, set to Y + +config MLX4_EN_VXLAN + bool "VXLAN offloads Support" + default y + depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m) + ---help--- + Say Y here if you want to use VXLAN offloads in the driver. + +config MLX4_CORE + tristate + depends on PCI + default n + +config MLX4_DEBUG + bool "Verbose debugging output" if (MLX4_CORE && EXPERT) + depends on MLX4_CORE + default y + ---help--- + This option causes debugging code to be compiled into the + mlx4_core driver. The output can be turned on via the + debug_level module parameter (which can also be set after + the driver is loaded through sysfs). diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile new file mode 100644 index 000000000..c82217e0d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -0,0 +1,11 @@ +obj-$(CONFIG_MLX4_CORE) += mlx4_core.o + +mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \ + main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \ + srq.o resource_tracker.o + +obj-$(CONFIG_MLX4_EN) += mlx4_en.o + +mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ + en_resources.o en_netdev.o en_selftest.o en_clock.o +mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c new file mode 100644 index 000000000..0c51c69f8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -0,0 +1,835 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/export.h> +#include <linux/bitmap.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> + +#include "mlx4.h" + +u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) +{ + u32 obj; + + spin_lock(&bitmap->lock); + + obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); + if (obj >= bitmap->max) { + bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) + & bitmap->mask; + obj = find_first_zero_bit(bitmap->table, bitmap->max); + } + + if (obj < bitmap->max) { + set_bit(obj, bitmap->table); + bitmap->last = (obj + 1); + if (bitmap->last == bitmap->max) + bitmap->last = 0; + obj |= bitmap->top; + } else + obj = -1; + + if (obj != -1) + --bitmap->avail; + + spin_unlock(&bitmap->lock); + + return obj; +} + +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr) +{ + mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); +} + +static unsigned long find_aligned_range(unsigned long *bitmap, + u32 start, u32 nbits, + int len, int align, u32 skip_mask) +{ + unsigned long end, i; + +again: + start = ALIGN(start, align); + + while ((start < nbits) && (test_bit(start, bitmap) || + (start & skip_mask))) + start += align; + + if (start >= nbits) + return -1; + + end = start+len; + if (end > nbits) + return -1; + + for (i = start + 1; i < end; i++) { + if (test_bit(i, bitmap) || ((u32)i & skip_mask)) { + start = i + 1; + goto again; + } + } + + return start; +} + +u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, + int align, u32 skip_mask) +{ + u32 obj; + + if (likely(cnt == 1 && align == 1 && !skip_mask)) + return mlx4_bitmap_alloc(bitmap); + + spin_lock(&bitmap->lock); + + obj = find_aligned_range(bitmap->table, bitmap->last, + bitmap->max, cnt, align, skip_mask); + if (obj >= bitmap->max) { + bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) + & bitmap->mask; + obj = find_aligned_range(bitmap->table, 0, bitmap->max, + cnt, align, skip_mask); + } + + if (obj < bitmap->max) { + bitmap_set(bitmap->table, obj, cnt); + if (obj == bitmap->last) { + bitmap->last = (obj + cnt); + if (bitmap->last >= bitmap->max) + bitmap->last = 0; + } + obj |= bitmap->top; + } else + obj = -1; + + if (obj != -1) + bitmap->avail -= cnt; + + spin_unlock(&bitmap->lock); + + return obj; +} + +u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) +{ + return bitmap->avail; +} + +static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj) +{ + return obj & (bitmap->max + bitmap->reserved_top - 1); +} + +void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, + int use_rr) +{ + obj &= bitmap->max + bitmap->reserved_top - 1; + + spin_lock(&bitmap->lock); + if (!use_rr) { + bitmap->last = min(bitmap->last, obj); + bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) + & bitmap->mask; + } + bitmap_clear(bitmap->table, obj, cnt); + bitmap->avail += cnt; + spin_unlock(&bitmap->lock); +} + +int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, + u32 reserved_bot, u32 reserved_top) +{ + /* num must be a power of 2 */ + if (num != roundup_pow_of_two(num)) + return -EINVAL; + + bitmap->last = 0; + bitmap->top = 0; + bitmap->max = num - reserved_top; + bitmap->mask = mask; + bitmap->reserved_top = reserved_top; + bitmap->avail = num - reserved_top - reserved_bot; + bitmap->effective_len = bitmap->avail; + spin_lock_init(&bitmap->lock); + bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * + sizeof (long), GFP_KERNEL); + if (!bitmap->table) + return -ENOMEM; + + bitmap_set(bitmap->table, 0, reserved_bot); + + return 0; +} + +void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) +{ + kfree(bitmap->table); +} + +struct mlx4_zone_allocator { + struct list_head entries; + struct list_head prios; + u32 last_uid; + u32 mask; + /* protect the zone_allocator from concurrent accesses */ + spinlock_t lock; + enum mlx4_zone_alloc_flags flags; +}; + +struct mlx4_zone_entry { + struct list_head list; + struct list_head prio_list; + u32 uid; + struct mlx4_zone_allocator *allocator; + struct mlx4_bitmap *bitmap; + int use_rr; + int priority; + int offset; + enum mlx4_zone_flags flags; +}; + +struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags) +{ + struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL); + + if (NULL == zones) + return NULL; + + INIT_LIST_HEAD(&zones->entries); + INIT_LIST_HEAD(&zones->prios); + spin_lock_init(&zones->lock); + zones->last_uid = 0; + zones->mask = 0; + zones->flags = flags; + + return zones; +} + +int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, + struct mlx4_bitmap *bitmap, + u32 flags, + int priority, + int offset, + u32 *puid) +{ + u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1); + struct mlx4_zone_entry *it; + struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); + + if (NULL == zone) + return -ENOMEM; + + zone->flags = flags; + zone->bitmap = bitmap; + zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; + zone->priority = priority; + zone->offset = offset; + + spin_lock(&zone_alloc->lock); + + zone->uid = zone_alloc->last_uid++; + zone->allocator = zone_alloc; + + if (zone_alloc->mask < mask) + zone_alloc->mask = mask; + + list_for_each_entry(it, &zone_alloc->prios, prio_list) + if (it->priority >= priority) + break; + + if (&it->prio_list == &zone_alloc->prios || it->priority > priority) + list_add_tail(&zone->prio_list, &it->prio_list); + list_add_tail(&zone->list, &it->list); + + spin_unlock(&zone_alloc->lock); + + *puid = zone->uid; + + return 0; +} + +/* Should be called under a lock */ +static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) +{ + struct mlx4_zone_allocator *zone_alloc = entry->allocator; + + if (!list_empty(&entry->prio_list)) { + /* Check if we need to add an alternative node to the prio list */ + if (!list_is_last(&entry->list, &zone_alloc->entries)) { + struct mlx4_zone_entry *next = list_first_entry(&entry->list, + typeof(*next), + list); + + if (next->priority == entry->priority) + list_add_tail(&next->prio_list, &entry->prio_list); + } + + list_del(&entry->prio_list); + } + + list_del(&entry->list); + + if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) { + u32 mask = 0; + struct mlx4_zone_entry *it; + + list_for_each_entry(it, &zone_alloc->prios, prio_list) { + u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1); + + if (mask < cur_mask) + mask = cur_mask; + } + zone_alloc->mask = mask; + } + + return 0; +} + +void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) +{ + struct mlx4_zone_entry *zone, *tmp; + + spin_lock(&zone_alloc->lock); + + list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) { + list_del(&zone->list); + list_del(&zone->prio_list); + kfree(zone); + } + + spin_unlock(&zone_alloc->lock); + kfree(zone_alloc); +} + +/* Should be called under a lock */ +static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, + int align, u32 skip_mask, u32 *puid) +{ + u32 uid; + u32 res; + struct mlx4_zone_allocator *zone_alloc = zone->allocator; + struct mlx4_zone_entry *curr_node; + + res = mlx4_bitmap_alloc_range(zone->bitmap, count, + align, skip_mask); + + if (res != (u32)-1) { + res += zone->offset; + uid = zone->uid; + goto out; + } + + list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) { + if (unlikely(curr_node->priority == zone->priority)) + break; + } + + if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) { + struct mlx4_zone_entry *it = curr_node; + + list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) { + res = mlx4_bitmap_alloc_range(it->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += it->offset; + uid = it->uid; + goto out; + } + } + } + + if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) { + struct mlx4_zone_entry *it = curr_node; + + list_for_each_entry_from(it, &zone_alloc->entries, list) { + if (unlikely(it == zone)) + continue; + + if (unlikely(it->priority != curr_node->priority)) + break; + + res = mlx4_bitmap_alloc_range(it->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += it->offset; + uid = it->uid; + goto out; + } + } + } + + if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) { + if (list_is_last(&curr_node->prio_list, &zone_alloc->prios)) + goto out; + + curr_node = list_first_entry(&curr_node->prio_list, + typeof(*curr_node), + prio_list); + + list_for_each_entry_from(curr_node, &zone_alloc->entries, list) { + res = mlx4_bitmap_alloc_range(curr_node->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += curr_node->offset; + uid = curr_node->uid; + goto out; + } + } + } + +out: + if (NULL != puid && res != (u32)-1) + *puid = uid; + return res; +} + +/* Should be called under a lock */ +static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj, + u32 count) +{ + mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr); +} + +/* Should be called under a lock */ +static struct mlx4_zone_entry *__mlx4_find_zone_by_uid( + struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + + list_for_each_entry(zone, &zones->entries, list) { + if (zone->uid == uid) + return zone; + } + + return NULL; +} + +struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + struct mlx4_bitmap *bitmap; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + bitmap = zone == NULL ? NULL : zone->bitmap; + + spin_unlock(&zones->lock); + + return bitmap; +} + +int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + int res; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) { + res = -1; + goto out; + } + + res = __mlx4_zone_remove_one_entry(zone); + +out: + spin_unlock(&zones->lock); + kfree(zone); + + return res; +} + +/* Should be called under a lock */ +static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique( + struct mlx4_zone_allocator *zones, u32 obj) +{ + struct mlx4_zone_entry *zone, *zone_candidate = NULL; + u32 dist = (u32)-1; + + /* Search for the smallest zone that this obj could be + * allocated from. This is done in order to handle + * situations when small bitmaps are allocated from bigger + * bitmaps (and the allocated space is marked as reserved in + * the bigger bitmap. + */ + list_for_each_entry(zone, &zones->entries, list) { + if (obj >= zone->offset) { + u32 mobj = (obj - zone->offset) & zones->mask; + + if (mobj < zone->bitmap->max) { + u32 curr_dist = zone->bitmap->effective_len; + + if (curr_dist < dist) { + dist = curr_dist; + zone_candidate = zone; + } + } + } + } + + return zone_candidate; +} + +u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, + int align, u32 skip_mask, u32 *puid) +{ + struct mlx4_zone_entry *zone; + int res = -1; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) + goto out; + + res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid); + +out: + spin_unlock(&zones->lock); + + return res; +} + +u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count) +{ + struct mlx4_zone_entry *zone; + int res = 0; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) { + res = -1; + goto out; + } + + __mlx4_free_from_zone(zone, obj, count); + +out: + spin_unlock(&zones->lock); + + return res; +} + +u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count) +{ + struct mlx4_zone_entry *zone; + int res; + + if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP)) + return -EFAULT; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid_unique(zones, obj); + + if (NULL == zone) { + res = -1; + goto out; + } + + __mlx4_free_from_zone(zone, obj, count); + res = 0; + +out: + spin_unlock(&zones->lock); + + return res; +} +/* + * Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf, gfp_t gfp) +{ + dma_addr_t t; + + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev, + size, &t, gfp); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + + memset(buf->direct.buf, 0, size); + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + gfp); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; ++i) { + buf->page_list[i].buf = + dma_alloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, + &t, gfp); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + + memset(buf->page_list[i].buf, 0, PAGE_SIZE); + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof *pages * buf->nbufs, gfp); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; ++i) + pages[i] = virt_to_page(buf->page_list[i].buf); + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + mlx4_buf_free(dev, size, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(mlx4_buf_alloc); + +void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) +{ + int i; + + if (buf->nbufs == 1) + dma_free_coherent(&dev->persist->pdev->dev, size, + buf->direct.buf, + buf->direct.map); + else { + if (BITS_PER_LONG == 64) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; ++i) + if (buf->page_list[i].buf) + dma_free_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(mlx4_buf_free); + +static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, + gfp_t gfp) +{ + struct mlx4_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof *pgdir, gfp); + if (!pgdir) + return NULL; + + bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); + pgdir->bits[0] = pgdir->order0; + pgdir->bits[1] = pgdir->order1; + pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, + &pgdir->db_dma, gfp); + if (!pgdir->db_page) { + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, + struct mlx4_db *db, int order) +{ + int o; + int i; + + for (o = order; o <= 1; ++o) { + i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); + if (i < MLX4_DB_PER_PAGE >> o) + goto found; + } + + return -ENOMEM; + +found: + clear_bit(i, pgdir->bits[o]); + + i <<= o; + + if (o > order) + set_bit(i ^ 1, pgdir->bits[order]); + + db->u.pgdir = pgdir; + db->index = i; + db->db = pgdir->db_page + db->index; + db->dma = pgdir->db_dma + db->index * 4; + db->order = order; + + return 0; +} + +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&priv->pgdir_mutex); + + list_for_each_entry(pgdir, &priv->pgdir_list, list) + if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) + goto out; + + pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &priv->pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); + +out: + mutex_unlock(&priv->pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_db_alloc); + +void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int o; + int i; + + mutex_lock(&priv->pgdir_mutex); + + o = db->order; + i = db->index; + + if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { + clear_bit(i ^ 1, db->u.pgdir->order0); + ++o; + } + i >>= o; + set_bit(i, db->u.pgdir->bits[o]); + + if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + kfree(db->u.pgdir); + } + + mutex_unlock(&priv->pgdir_mutex); +} +EXPORT_SYMBOL_GPL(mlx4_db_free); + +int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, + int size, int max_direct) +{ + int err; + + err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL); + if (err) + return err; + + *wqres->db.db = 0; + + err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL); + if (err) + goto err_db; + + err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, + &wqres->mtt); + if (err) + goto err_buf; + + err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); + if (err) + goto err_mtt; + + return 0; + +err_mtt: + mlx4_mtt_cleanup(dev, &wqres->mtt); +err_buf: + mlx4_buf_free(dev, size, &wqres->buf); +err_db: + mlx4_db_free(dev, &wqres->db); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); + +void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, + int size) +{ + mlx4_mtt_cleanup(dev, &wqres->mtt); + mlx4_buf_free(dev, size, &wqres->buf); + mlx4_db_free(dev, &wqres->db); +} +EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c new file mode 100644 index 000000000..715de8aff --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/workqueue.h> +#include <linux/module.h> + +#include "mlx4.h" + +enum { + MLX4_CATAS_POLL_INTERVAL = 5 * HZ, +}; + + + +int mlx4_internal_err_reset = 1; +module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644); +MODULE_PARM_DESC(internal_err_reset, + "Reset device on internal errors if non-zero (default 1)"); + +static int read_vendor_id(struct mlx4_dev *dev) +{ + u16 vendor_id = 0; + int ret; + + ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id); + if (ret) { + mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret); + return ret; + } + + if (vendor_id == 0xffff) { + mlx4_err(dev, "PCI can't be accessed to read vendor id\n"); + return -EINVAL; + } + + return 0; +} + +static int mlx4_reset_master(struct mlx4_dev *dev) +{ + int err = 0; + + if (mlx4_is_master(dev)) + mlx4_report_internal_err_comm_event(dev); + + if (!pci_channel_offline(dev->persist->pdev)) { + err = read_vendor_id(dev); + /* If PCI can't be accessed to read vendor ID we assume that its + * link was disabled and chip was already reset. + */ + if (err) + return 0; + + err = mlx4_reset(dev); + if (err) + mlx4_err(dev, "Fail to reset HCA\n"); + } + + return err; +} + +static int mlx4_reset_slave(struct mlx4_dev *dev) +{ +#define COM_CHAN_RST_REQ_OFFSET 0x10 +#define COM_CHAN_RST_ACK_OFFSET 0x08 + + u32 comm_flags; + u32 rst_req; + u32 rst_ack; + unsigned long end; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (pci_channel_offline(dev->persist->pdev)) + return 0; + + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + if (comm_flags == 0xffffffff) { + mlx4_err(dev, "VF reset is not needed\n"); + return 0; + } + + if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) { + mlx4_err(dev, "VF reset is not supported\n"); + return -EOPNOTSUPP; + } + + rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >> + COM_CHAN_RST_REQ_OFFSET; + rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >> + COM_CHAN_RST_ACK_OFFSET; + if (rst_req != rst_ack) { + mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n"); + return -EIO; + } + + rst_req ^= 1; + mlx4_warn(dev, "VF is sending reset request to Firmware\n"); + comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET; + __raw_writel((__force u32)cpu_to_be32(comm_flags), + (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS); + /* Make sure that our comm channel write doesn't + * get mixed in with writes from another CPU. + */ + mmiowb(); + + end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies; + while (time_before(jiffies, end)) { + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >> + COM_CHAN_RST_ACK_OFFSET; + + /* Reading rst_req again since the communication channel can + * be reset at any time by the PF and all its bits will be + * set to zero. + */ + rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >> + COM_CHAN_RST_REQ_OFFSET; + + if (rst_ack == rst_req) { + mlx4_warn(dev, "VF Reset succeed\n"); + return 0; + } + cond_resched(); + } + mlx4_err(dev, "Fail to send reset over the communication channel\n"); + return -ETIMEDOUT; +} + +static int mlx4_comm_internal_err(u32 slave_read) +{ + return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == + (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; +} + +void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) +{ + int err; + struct mlx4_dev *dev; + + if (!mlx4_internal_err_reset) + return; + + mutex_lock(&persist->device_state_mutex); + if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + goto out; + + dev = persist->dev; + mlx4_err(dev, "device is going to be reset\n"); + if (mlx4_is_slave(dev)) + err = mlx4_reset_slave(dev); + else + err = mlx4_reset_master(dev); + BUG_ON(err != 0); + + dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; + mlx4_err(dev, "device was reset successfully\n"); + mutex_unlock(&persist->device_state_mutex); + + /* At that step HW was already reset, now notify clients */ + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); + mlx4_cmd_wake_completions(dev); + return; + +out: + mutex_unlock(&persist->device_state_mutex); +} + +static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) +{ + int err = 0; + + mlx4_enter_error_state(persist); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP && + !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { + err = mlx4_restart_one(persist->pdev); + mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", + err); + } + mutex_unlock(&persist->interface_state_mutex); +} + +static void dump_err_buf(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + int i; + + mlx4_err(dev, "Internal error detected:\n"); + for (i = 0; i < priv->fw.catas_size; ++i) + mlx4_err(dev, " buf[%02x]: %08x\n", + i, swab32(readl(priv->catas_err.map + i))); +} + +static void poll_catas(unsigned long dev_ptr) +{ + struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; + struct mlx4_priv *priv = mlx4_priv(dev); + u32 slave_read; + + if (mlx4_is_slave(dev)) { + slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); + if (mlx4_comm_internal_err(slave_read)) { + mlx4_warn(dev, "Internal error detected on the communication channel\n"); + goto internal_err; + } + } else if (readl(priv->catas_err.map)) { + dump_err_buf(dev); + goto internal_err; + } + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + mlx4_warn(dev, "Internal error mark was detected on device\n"); + goto internal_err; + } + + mod_timer(&priv->catas_err.timer, + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); + return; + +internal_err: + if (mlx4_internal_err_reset) + queue_work(dev->persist->catas_wq, &dev->persist->catas_work); +} + +static void catas_reset(struct work_struct *work) +{ + struct mlx4_dev_persistent *persist = + container_of(work, struct mlx4_dev_persistent, + catas_work); + + mlx4_handle_error_state(persist); +} + +void mlx4_start_catas_poll(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + phys_addr_t addr; + + INIT_LIST_HEAD(&priv->catas_err.list); + init_timer(&priv->catas_err.timer); + priv->catas_err.map = NULL; + + if (!mlx4_is_slave(dev)) { + addr = pci_resource_start(dev->persist->pdev, + priv->fw.catas_bar) + + priv->fw.catas_offset; + + priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); + if (!priv->catas_err.map) { + mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", + (unsigned long long)addr); + return; + } + } + + priv->catas_err.timer.data = (unsigned long) dev; + priv->catas_err.timer.function = poll_catas; + priv->catas_err.timer.expires = + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); + add_timer(&priv->catas_err.timer); +} + +void mlx4_stop_catas_poll(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + del_timer_sync(&priv->catas_err.timer); + + if (priv->catas_err.map) { + iounmap(priv->catas_err.map); + priv->catas_err.map = NULL; + } + + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION) + flush_workqueue(dev->persist->catas_wq); +} + +int mlx4_catas_init(struct mlx4_dev *dev) +{ + INIT_WORK(&dev->persist->catas_work, catas_reset); + dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health"); + if (!dev->persist->catas_wq) + return -ENOMEM; + + return 0; +} + +void mlx4_catas_end(struct mlx4_dev *dev) +{ + if (dev->persist->catas_wq) { + destroy_workqueue(dev->persist->catas_wq); + dev->persist->catas_wq = NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c new file mode 100644 index 000000000..529ef0594 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -0,0 +1,3212 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/pci.h> +#include <linux/errno.h> + +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/device.h> +#include <linux/semaphore.h> +#include <rdma/ib_smi.h> +#include <linux/delay.h> + +#include <asm/io.h> + +#include "mlx4.h" +#include "fw.h" +#include "fw_qos.h" + +#define CMD_POLL_TOKEN 0xffff +#define INBOX_MASK 0xffffffffffffff00ULL + +#define CMD_CHAN_VER 1 +#define CMD_CHAN_IF_REV 1 + +enum { + /* command completed successfully: */ + CMD_STAT_OK = 0x00, + /* Internal error (such as a bus error) occurred while processing command: */ + CMD_STAT_INTERNAL_ERR = 0x01, + /* Operation/command not supported or opcode modifier not supported: */ + CMD_STAT_BAD_OP = 0x02, + /* Parameter not supported or parameter out of range: */ + CMD_STAT_BAD_PARAM = 0x03, + /* System not enabled or bad system state: */ + CMD_STAT_BAD_SYS_STATE = 0x04, + /* Attempt to access reserved or unallocaterd resource: */ + CMD_STAT_BAD_RESOURCE = 0x05, + /* Requested resource is currently executing a command, or is otherwise busy: */ + CMD_STAT_RESOURCE_BUSY = 0x06, + /* Required capability exceeds device limits: */ + CMD_STAT_EXCEED_LIM = 0x08, + /* Resource is not in the appropriate state or ownership: */ + CMD_STAT_BAD_RES_STATE = 0x09, + /* Index out of range: */ + CMD_STAT_BAD_INDEX = 0x0a, + /* FW image corrupted: */ + CMD_STAT_BAD_NVMEM = 0x0b, + /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */ + CMD_STAT_ICM_ERROR = 0x0c, + /* Attempt to modify a QP/EE which is not in the presumed state: */ + CMD_STAT_BAD_QP_STATE = 0x10, + /* Bad segment parameters (Address/Size): */ + CMD_STAT_BAD_SEG_PARAM = 0x20, + /* Memory Region has Memory Windows bound to: */ + CMD_STAT_REG_BOUND = 0x21, + /* HCA local attached memory not present: */ + CMD_STAT_LAM_NOT_PRE = 0x22, + /* Bad management packet (silently discarded): */ + CMD_STAT_BAD_PKT = 0x30, + /* More outstanding CQEs in CQ than new CQ size: */ + CMD_STAT_BAD_SIZE = 0x40, + /* Multi Function device support required: */ + CMD_STAT_MULTI_FUNC_REQ = 0x50, +}; + +enum { + HCR_IN_PARAM_OFFSET = 0x00, + HCR_IN_MODIFIER_OFFSET = 0x08, + HCR_OUT_PARAM_OFFSET = 0x0c, + HCR_TOKEN_OFFSET = 0x14, + HCR_STATUS_OFFSET = 0x18, + + HCR_OPMOD_SHIFT = 12, + HCR_T_BIT = 21, + HCR_E_BIT = 22, + HCR_GO_BIT = 23 +}; + +enum { + GO_BIT_TIMEOUT_MSECS = 10000 +}; + +enum mlx4_vlan_transition { + MLX4_VLAN_TRANSITION_VST_VST = 0, + MLX4_VLAN_TRANSITION_VST_VGT = 1, + MLX4_VLAN_TRANSITION_VGT_VST = 2, + MLX4_VLAN_TRANSITION_VGT_VGT = 3, +}; + + +struct mlx4_cmd_context { + struct completion done; + int result; + int next; + u64 out_param; + u16 token; + u8 fw_status; +}; + +static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr_cmd *in_vhcr); + +static int mlx4_status_to_errno(u8 status) +{ + static const int trans_table[] = { + [CMD_STAT_INTERNAL_ERR] = -EIO, + [CMD_STAT_BAD_OP] = -EPERM, + [CMD_STAT_BAD_PARAM] = -EINVAL, + [CMD_STAT_BAD_SYS_STATE] = -ENXIO, + [CMD_STAT_BAD_RESOURCE] = -EBADF, + [CMD_STAT_RESOURCE_BUSY] = -EBUSY, + [CMD_STAT_EXCEED_LIM] = -ENOMEM, + [CMD_STAT_BAD_RES_STATE] = -EBADF, + [CMD_STAT_BAD_INDEX] = -EBADF, + [CMD_STAT_BAD_NVMEM] = -EFAULT, + [CMD_STAT_ICM_ERROR] = -ENFILE, + [CMD_STAT_BAD_QP_STATE] = -EINVAL, + [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, + [CMD_STAT_REG_BOUND] = -EBUSY, + [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, + [CMD_STAT_BAD_PKT] = -EINVAL, + [CMD_STAT_BAD_SIZE] = -ENOMEM, + [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, + }; + + if (status >= ARRAY_SIZE(trans_table) || + (status != CMD_STAT_OK && trans_table[status] == 0)) + return -EIO; + + return trans_table[status]; +} + +static u8 mlx4_errno_to_status(int errno) +{ + switch (errno) { + case -EPERM: + return CMD_STAT_BAD_OP; + case -EINVAL: + return CMD_STAT_BAD_PARAM; + case -ENXIO: + return CMD_STAT_BAD_SYS_STATE; + case -EBUSY: + return CMD_STAT_RESOURCE_BUSY; + case -ENOMEM: + return CMD_STAT_EXCEED_LIM; + case -ENFILE: + return CMD_STAT_ICM_ERROR; + default: + return CMD_STAT_INTERNAL_ERR; + } +} + +static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op, + u8 op_modifier) +{ + switch (op) { + case MLX4_CMD_UNMAP_ICM: + case MLX4_CMD_UNMAP_ICM_AUX: + case MLX4_CMD_UNMAP_FA: + case MLX4_CMD_2RST_QP: + case MLX4_CMD_HW2SW_EQ: + case MLX4_CMD_HW2SW_CQ: + case MLX4_CMD_HW2SW_SRQ: + case MLX4_CMD_HW2SW_MPT: + case MLX4_CMD_CLOSE_HCA: + case MLX4_QP_FLOW_STEERING_DETACH: + case MLX4_CMD_FREE_RES: + case MLX4_CMD_CLOSE_PORT: + return CMD_STAT_OK; + + case MLX4_CMD_QP_ATTACH: + /* On Detach case return success */ + if (op_modifier == 0) + return CMD_STAT_OK; + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + + default: + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + } +} + +static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status) +{ + /* Any error during the closing commands below is considered fatal */ + if (op == MLX4_CMD_CLOSE_HCA || + op == MLX4_CMD_HW2SW_EQ || + op == MLX4_CMD_HW2SW_CQ || + op == MLX4_CMD_2RST_QP || + op == MLX4_CMD_HW2SW_SRQ || + op == MLX4_CMD_SYNC_TPT || + op == MLX4_CMD_UNMAP_ICM || + op == MLX4_CMD_UNMAP_ICM_AUX || + op == MLX4_CMD_UNMAP_FA) + return 1; + /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals + * CMD_STAT_REG_BOUND. + * This status indicates that memory region has memory windows bound to it + * which may result from invalid user space usage and is not fatal. + */ + if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND) + return 1; + return 0; +} + +static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier, + int err) +{ + /* Only if reset flow is really active return code is based on + * command, otherwise current error code is returned. + */ + if (mlx4_internal_err_reset) { + mlx4_enter_error_state(dev->persist); + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + } + + return err; +} + +static int comm_pending(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 status = readl(&priv->mfunc.comm->slave_read); + + return (swab32(status) >> 31) != priv->cmd.comm_toggle; +} + +static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 val; + + /* To avoid writing to unknown addresses after the device state was + * changed to internal error and the function was rest, + * check the INTERNAL_ERROR flag which is updated under + * device_state_mutex lock. + */ + mutex_lock(&dev->persist->device_state_mutex); + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + mutex_unlock(&dev->persist->device_state_mutex); + return -EIO; + } + + priv->cmd.comm_toggle ^= 1; + val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); + __raw_writel((__force u32) cpu_to_be32(val), + &priv->mfunc.comm->slave_write); + mmiowb(); + mutex_unlock(&dev->persist->device_state_mutex); + return 0; +} + +static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, + unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned long end; + int err = 0; + int ret_from_pending = 0; + + /* First, verify that the master reports correct status */ + if (comm_pending(dev)) { + mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n", + priv->cmd.comm_toggle, cmd); + return -EAGAIN; + } + + /* Write command */ + down(&priv->cmd.poll_sem); + if (mlx4_comm_cmd_post(dev, cmd, param)) { + /* Only in case the device state is INTERNAL_ERROR, + * mlx4_comm_cmd_post returns with an error + */ + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + goto out; + } + + end = msecs_to_jiffies(timeout) + jiffies; + while (comm_pending(dev) && time_before(jiffies, end)) + cond_resched(); + ret_from_pending = comm_pending(dev); + if (ret_from_pending) { + /* check if the slave is trying to boot in the middle of + * FLR process. The only non-zero result in the RESET command + * is MLX4_DELAY_RESET_SLAVE*/ + if ((MLX4_COMM_CMD_RESET == cmd)) { + err = MLX4_DELAY_RESET_SLAVE; + goto out; + } else { + mlx4_warn(dev, "Communication channel command 0x%x timed out\n", + cmd); + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + } + } + + if (err) + mlx4_enter_error_state(dev->persist); +out: + up(&priv->cmd.poll_sem); + return err; +} + +static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd, + u16 param, u16 op, unsigned long timeout) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_context *context; + unsigned long end; + int err = 0; + + down(&cmd->event_sem); + + spin_lock(&cmd->context_lock); + BUG_ON(cmd->free_head < 0); + context = &cmd->context[cmd->free_head]; + context->token += cmd->token_mask + 1; + cmd->free_head = context->next; + spin_unlock(&cmd->context_lock); + + reinit_completion(&context->done); + + if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) { + /* Only in case the device state is INTERNAL_ERROR, + * mlx4_comm_cmd_post returns with an error + */ + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + goto out; + } + + if (!wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n", + vhcr_cmd, op); + goto out_reset; + } + + err = context->result; + if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + vhcr_cmd, context->fw_status); + if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) + goto out_reset; + } + + /* wait for comm channel ready + * this is necessary for prevention the race + * when switching between event to polling mode + * Skipping this section in case the device is in FATAL_ERROR state, + * In this state, no commands are sent via the comm channel until + * the device has returned from reset. + */ + if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { + end = msecs_to_jiffies(timeout) + jiffies; + while (comm_pending(dev) && time_before(jiffies, end)) + cond_resched(); + } + goto out; + +out_reset: + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + mlx4_enter_error_state(dev->persist); +out: + spin_lock(&cmd->context_lock); + context->next = cmd->free_head; + cmd->free_head = context - cmd->context; + spin_unlock(&cmd->context_lock); + + up(&cmd->event_sem); + return err; +} + +int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, + u16 op, unsigned long timeout) +{ + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout); + return mlx4_comm_cmd_poll(dev, cmd, param, timeout); +} + +static int cmd_pending(struct mlx4_dev *dev) +{ + u32 status; + + if (pci_channel_offline(dev->persist->pdev)) + return -EIO; + + status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); + + return (status & swab32(1 << HCR_GO_BIT)) || + (mlx4_priv(dev)->cmd.toggle == + !!(status & swab32(1 << HCR_T_BIT))); +} + +static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, + u32 in_modifier, u8 op_modifier, u16 op, u16 token, + int event) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + u32 __iomem *hcr = cmd->hcr; + int ret = -EIO; + unsigned long end; + + mutex_lock(&dev->persist->device_state_mutex); + /* To avoid writing to unknown addresses after the device state was + * changed to internal error and the chip was reset, + * check the INTERNAL_ERROR flag which is updated under + * device_state_mutex lock. + */ + if (pci_channel_offline(dev->persist->pdev) || + (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + goto out; + } + + end = jiffies; + if (event) + end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); + + while (cmd_pending(dev)) { + if (pci_channel_offline(dev->persist->pdev)) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + goto out; + } + + if (time_after_eq(jiffies, end)) { + mlx4_err(dev, "%s:cmd_pending failed\n", __func__); + goto out; + } + cond_resched(); + } + + /* + * We use writel (instead of something like memcpy_toio) + * because writes of less than 32 bits to the HCR don't work + * (and some architectures such as ia64 implement memcpy_toio + * in terms of writeb). + */ + __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); + __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); + __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); + __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); + __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); + __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); + + /* __raw_writel may not order writes. */ + wmb(); + + __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | + (cmd->toggle << HCR_T_BIT) | + (event ? (1 << HCR_E_BIT) : 0) | + (op_modifier << HCR_OPMOD_SHIFT) | + op), hcr + 6); + + /* + * Make sure that our HCR writes don't get mixed in with + * writes from another CPU starting a FW command. + */ + mmiowb(); + + cmd->toggle = cmd->toggle ^ 1; + + ret = 0; + +out: + if (ret) + mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n", + op, ret, in_param, in_modifier, op_modifier); + mutex_unlock(&dev->persist->device_state_mutex); + + return ret; +} + +static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; + int ret; + + mutex_lock(&priv->cmd.slave_cmd_mutex); + + vhcr->in_param = cpu_to_be64(in_param); + vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; + vhcr->in_modifier = cpu_to_be32(in_modifier); + vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff)); + vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); + vhcr->status = 0; + vhcr->flags = !!(priv->cmd.use_events) << 6; + + if (mlx4_is_master(dev)) { + ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); + if (!ret) { + if (out_is_imm) { + if (out_param) + *out_param = + be64_to_cpu(vhcr->out_param); + else { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + vhcr->status = CMD_STAT_BAD_PARAM; + } + } + ret = mlx4_status_to_errno(vhcr->status); + } + if (ret && + dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + ret = mlx4_internal_err_ret_value(dev, op, op_modifier); + } else { + ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op, + MLX4_COMM_TIME + timeout); + if (!ret) { + if (out_is_imm) { + if (out_param) + *out_param = + be64_to_cpu(vhcr->out_param); + else { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + vhcr->status = CMD_STAT_BAD_PARAM; + } + } + ret = mlx4_status_to_errno(vhcr->status); + } else { + if (dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR) + ret = mlx4_internal_err_ret_value(dev, op, + op_modifier); + else + mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op); + } + } + + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return ret; +} + +static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + void __iomem *hcr = priv->cmd.hcr; + int err = 0; + unsigned long end; + u32 stat; + + down(&priv->cmd.poll_sem); + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + goto out; + } + + if (out_is_imm && !out_param) { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + err = -EINVAL; + goto out; + } + + err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, + in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); + if (err) + goto out_reset; + + end = msecs_to_jiffies(timeout) + jiffies; + while (cmd_pending(dev) && time_before(jiffies, end)) { + if (pci_channel_offline(dev->persist->pdev)) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + err = -EIO; + goto out_reset; + } + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + goto out; + } + + cond_resched(); + } + + if (cmd_pending(dev)) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); + err = -EIO; + goto out_reset; + } + + if (out_is_imm) + *out_param = + (u64) be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | + (u64) be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); + stat = be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; + err = mlx4_status_to_errno(stat); + if (err) { + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, stat); + if (mlx4_closing_cmd_fatal_error(op, stat)) + goto out_reset; + goto out; + } + +out_reset: + if (err) + err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); +out: + up(&priv->cmd.poll_sem); + return err; +} + +void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_context *context = + &priv->cmd.context[token & priv->cmd.token_mask]; + + /* previously timed out command completing at long last */ + if (token != context->token) + return; + + context->fw_status = status; + context->result = mlx4_status_to_errno(status); + context->out_param = out_param; + + complete(&context->done); +} + +static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_context *context; + int err = 0; + + down(&cmd->event_sem); + + spin_lock(&cmd->context_lock); + BUG_ON(cmd->free_head < 0); + context = &cmd->context[cmd->free_head]; + context->token += cmd->token_mask + 1; + cmd->free_head = context->next; + spin_unlock(&cmd->context_lock); + + if (out_is_imm && !out_param) { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + err = -EINVAL; + goto out; + } + + reinit_completion(&context->done); + + err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, + in_modifier, op_modifier, op, context->token, 1); + if (err) + goto out_reset; + + if (!wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); + if (op == MLX4_CMD_NOP) { + err = -EBUSY; + goto out; + } else { + err = -EIO; + goto out_reset; + } + } + + err = context->result; + if (err) { + /* Since we do not want to have this error message always + * displayed at driver start when there are ConnectX2 HCAs + * on the host, we deprecate the error message for this + * specific command/input_mod/opcode_mod/fw-status to be debug. + */ + if (op == MLX4_CMD_SET_PORT && + (in_modifier == 1 || in_modifier == 2) && + op_modifier == MLX4_SET_PORT_IB_OPCODE && + context->fw_status == CMD_STAT_BAD_SIZE) + mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); + else + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + else if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) + goto out_reset; + + goto out; + } + + if (out_is_imm) + *out_param = context->out_param; + +out_reset: + if (err) + err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); +out: + spin_lock(&cmd->context_lock); + context->next = cmd->free_head; + cmd->free_head = context - cmd->context; + spin_unlock(&cmd->context_lock); + + up(&cmd->event_sem); + return err; +} + +int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout, int native) +{ + if (pci_channel_offline(dev->persist->pdev)) + return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO); + + if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + return mlx4_internal_err_ret_value(dev, op, + op_modifier); + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_cmd_wait(dev, in_param, out_param, + out_is_imm, in_modifier, + op_modifier, op, timeout); + else + return mlx4_cmd_poll(dev, in_param, out_param, + out_is_imm, in_modifier, + op_modifier, op, timeout); + } + return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, + in_modifier, op_modifier, op, timeout); +} +EXPORT_SYMBOL_GPL(__mlx4_cmd); + + +int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, + int slave, u64 slave_addr, + int size, int is_read) +{ + u64 in_param; + u64 out_param; + + if ((slave_addr & 0xfff) | (master_addr & 0xfff) | + (slave & ~0x7f) | (size & 0xff)) { + mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n", + slave_addr, master_addr, slave, size); + return -EINVAL; + } + + if (is_read) { + in_param = (u64) slave | slave_addr; + out_param = (u64) dev->caps.function | master_addr; + } else { + in_param = (u64) dev->caps.function | master_addr; + out_param = (u64) slave | slave_addr; + } + + return mlx4_cmd_imm(dev, in_param, &out_param, size, 0, + MLX4_CMD_ACCESS_MEM, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox) +{ + struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf); + struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf); + int err; + int i; + + if (index & 0x1f) + return -EINVAL; + + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) + return err; + + for (i = 0; i < 32; ++i) + pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]); + + return err; +} + +static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox) +{ + int i; + int err; + + for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) { + err = query_pkey_block(dev, port, i, table + i, inbox, outbox); + if (err) + return err; + } + + return 0; +} +#define PORT_CAPABILITY_LOCATION_IN_SMP 20 +#define PORT_STATE_OFFSET 32 + +static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf) +{ + if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP) + return IB_PORT_ACTIVE; + else + return IB_PORT_DOWN; +} + +static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct ib_smp *smp = inbox->buf; + u32 index; + u8 port; + u8 opcode_modifier; + u16 *table; + int err; + int vidx, pidx; + int network_view; + struct mlx4_priv *priv = mlx4_priv(dev); + struct ib_smp *outsmp = outbox->buf; + __be16 *outtab = (__be16 *)(outsmp->data); + __be32 slave_cap_mask; + __be64 slave_node_guid; + + port = vhcr->in_modifier; + + /* network-view bit is for driver use only, and should not be passed to FW */ + opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */ + network_view = !!(vhcr->op_modifier & 0x8); + + if (smp->base_version == 1 && + smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && + smp->class_version == 1) { + /* host view is paravirtualized */ + if (!network_view && smp->method == IB_MGMT_METHOD_GET) { + if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) { + index = be32_to_cpu(smp->attr_mod); + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, + sizeof(*table) * 32, GFP_KERNEL); + + if (!table) + return -ENOMEM; + /* need to get the full pkey table because the paravirtualized + * pkeys may be scattered among several pkey blocks. + */ + err = get_full_pkey_table(dev, port, table, inbox, outbox); + if (!err) { + for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) { + pidx = priv->virt2phys_pkey[slave][port - 1][vidx]; + outtab[vidx % 32] = cpu_to_be16(table[pidx]); + } + } + kfree(table); + return err; + } + if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) { + /*get the slave specific caps:*/ + /*do the command */ + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, + vhcr->in_modifier, opcode_modifier, + vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + /* modify the response for slaves */ + if (!err && slave != mlx4_master_func_num(dev)) { + u8 *state = outsmp->data + PORT_STATE_OFFSET; + + *state = (*state & 0xf0) | vf_port_state(dev, port, slave); + slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; + memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4); + } + return err; + } + if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) { + __be64 guid = mlx4_get_admin_guid(dev, slave, + port); + + /* set the PF admin guid to the FW/HW burned + * GUID, if it wasn't yet set + */ + if (slave == 0 && guid == 0) { + smp->attr_mod = 0; + err = mlx4_cmd_box(dev, + inbox->dma, + outbox->dma, + vhcr->in_modifier, + opcode_modifier, + vhcr->op, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) + return err; + mlx4_set_admin_guid(dev, + *(__be64 *)outsmp-> + data, slave, port); + } else { + memcpy(outsmp->data, &guid, 8); + } + + /* clean all other gids */ + memset(outsmp->data + 8, 0, 56); + return 0; + } + if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, + vhcr->in_modifier, opcode_modifier, + vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + if (!err) { + slave_node_guid = mlx4_get_slave_node_guid(dev, slave); + memcpy(outsmp->data + 12, &slave_node_guid, 8); + } + return err; + } + } + } + + /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs. + * These are the MADs used by ib verbs (such as ib_query_gids). + */ + if (slave != mlx4_master_func_num(dev) && + !mlx4_vf_smi_enabled(dev, slave, port)) { + if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && + smp->method == IB_MGMT_METHOD_GET) || network_view) { + mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", + slave, smp->method, smp->mgmt_class, + network_view ? "Network" : "Host", + be16_to_cpu(smp->attr_id)); + return -EPERM; + } + } + + return mlx4_cmd_box(dev, inbox->dma, outbox->dma, + vhcr->in_modifier, opcode_modifier, + vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); +} + +static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + +int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u64 in_param; + u64 out_param; + int err; + + in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; + out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; + if (cmd->encode_slave_id) { + in_param &= 0xffffffffffffff00ll; + in_param |= slave; + } + + err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, + vhcr->in_modifier, vhcr->op_modifier, vhcr->op, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + if (cmd->out_is_imm) + vhcr->out_param = out_param; + + return err; +} + +static struct mlx4_cmd_info cmd_info[] = { + { + .opcode = MLX4_CMD_QUERY_FW, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_FW_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_HCA, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_QUERY_DEV_CAP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_DEV_CAP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_FUNC_CAP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_FUNC_CAP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_ADAPTER, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_INIT_PORT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT_PORT_wrapper + }, + { + .opcode = MLX4_CMD_CLOSE_PORT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CLOSE_PORT_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_PORT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_PORT_wrapper + }, + { + .opcode = MLX4_CMD_SET_PORT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_PORT_wrapper + }, + { + .opcode = MLX4_CMD_MAP_EQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MAP_EQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_EQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_EQ_wrapper + }, + { + .opcode = MLX4_CMD_HW_HEALTH_CHECK, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_NOP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_CONFIG_DEV, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CONFIG_DEV_wrapper + }, + { + .opcode = MLX4_CMD_ALLOC_RES, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ALLOC_RES_wrapper + }, + { + .opcode = MLX4_CMD_FREE_RES, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_FREE_RES_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_MPT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_MPT_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_MPT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_MPT_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_MPT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_MPT_wrapper + }, + { + .opcode = MLX4_CMD_READ_MTT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_WRITE_MTT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_WRITE_MTT_wrapper + }, + { + .opcode = MLX4_CMD_SYNC_TPT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_HW2SW_EQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_HW2SW_EQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_EQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_QUERY_EQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_CQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_CQ_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_CQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_CQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_CQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_CQ_wrapper + }, + { + .opcode = MLX4_CMD_MODIFY_CQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MODIFY_CQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_SRQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_SRQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_SRQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_ARM_SRQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ARM_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_RST2INIT_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_RST2INIT_QP_wrapper + }, + { + .opcode = MLX4_CMD_INIT2INIT_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT2INIT_QP_wrapper + }, + { + .opcode = MLX4_CMD_INIT2RTR_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT2RTR_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTR2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_RTR2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTS2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_RTS2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQERR2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SQERR2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_2ERR_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTS2SQD_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQD2SQD_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SQD2SQD_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQD2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SQD2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_2RST_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_2RST_QP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_QP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SUSPEND_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_UNSUSPEND_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_UPDATE_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_UPDATE_QP_wrapper + }, + { + .opcode = MLX4_CMD_GET_OP_REQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { + .opcode = MLX4_CMD_ALLOCATE_VPP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { + .opcode = MLX4_CMD_SET_VPORT_QOS, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { + .opcode = MLX4_CMD_CONF_SPECIAL_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, /* XXX verify: only demux can do this */ + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_MAD_IFC, + .has_inbox = true, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MAD_IFC_wrapper + }, + { + .opcode = MLX4_CMD_MAD_DEMUX, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_IF_STAT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_IF_STAT_wrapper + }, + { + .opcode = MLX4_CMD_ACCESS_REG, + .has_inbox = true, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ACCESS_REG_wrapper, + }, + { + .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + /* Native multicast commands are not available for guests */ + { + .opcode = MLX4_CMD_QP_ATTACH, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_ATTACH_wrapper + }, + { + .opcode = MLX4_CMD_PROMISC, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_PROMISC_wrapper + }, + /* Ethernet specific commands */ + { + .opcode = MLX4_CMD_SET_VLAN_FLTR, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_VLAN_FLTR_wrapper + }, + { + .opcode = MLX4_CMD_SET_MCAST_FLTR, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_MCAST_FLTR_wrapper + }, + { + .opcode = MLX4_CMD_DUMP_ETH_STATS, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_DUMP_ETH_STATS_wrapper + }, + { + .opcode = MLX4_CMD_INFORM_FLR_DONE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + /* flow steering commands */ + { + .opcode = MLX4_QP_FLOW_STEERING_ATTACH, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper + }, + { + .opcode = MLX4_QP_FLOW_STEERING_DETACH, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper + }, + { + .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, + { + .opcode = MLX4_CMD_VIRT_PORT_MAP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, +}; + +static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr_cmd *in_vhcr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_info *cmd = NULL; + struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr; + struct mlx4_vhcr *vhcr; + struct mlx4_cmd_mailbox *inbox = NULL; + struct mlx4_cmd_mailbox *outbox = NULL; + u64 in_param; + u64 out_param; + int ret = 0; + int i; + int err = 0; + + /* Create sw representation of Virtual HCR */ + vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL); + if (!vhcr) + return -ENOMEM; + + /* DMA in the vHCR */ + if (!in_vhcr) { + ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, + priv->mfunc.master.slave_state[slave].vhcr_dma, + ALIGN(sizeof(struct mlx4_vhcr_cmd), + MLX4_ACCESS_MEM_ALIGN), 1); + if (ret) { + if (!(dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n", + __func__, ret); + kfree(vhcr); + return ret; + } + } + + /* Fill SW VHCR fields */ + vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); + vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); + vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier); + vhcr->token = be16_to_cpu(vhcr_cmd->token); + vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff; + vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12); + vhcr->e_bit = vhcr_cmd->flags & (1 << 6); + + /* Lookup command */ + for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) { + if (vhcr->op == cmd_info[i].opcode) { + cmd = &cmd_info[i]; + break; + } + } + if (!cmd) { + mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n", + vhcr->op, slave); + vhcr_cmd->status = CMD_STAT_BAD_PARAM; + goto out_status; + } + + /* Read inbox */ + if (cmd->has_inbox) { + vhcr->in_param &= INBOX_MASK; + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) { + vhcr_cmd->status = CMD_STAT_BAD_SIZE; + inbox = NULL; + goto out_status; + } + + ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave, + vhcr->in_param, + MLX4_MAILBOX_SIZE, 1); + if (ret) { + if (!(dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", + __func__, cmd->opcode); + vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; + goto out_status; + } + } + + /* Apply permission and bound checks if applicable */ + if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { + mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n", + vhcr->op, slave, vhcr->in_modifier); + vhcr_cmd->status = CMD_STAT_BAD_OP; + goto out_status; + } + + /* Allocate outbox */ + if (cmd->has_outbox) { + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + vhcr_cmd->status = CMD_STAT_BAD_SIZE; + outbox = NULL; + goto out_status; + } + } + + /* Execute the command! */ + if (cmd->wrapper) { + err = cmd->wrapper(dev, slave, vhcr, inbox, outbox, + cmd); + if (cmd->out_is_imm) + vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); + } else { + in_param = cmd->has_inbox ? (u64) inbox->dma : + vhcr->in_param; + out_param = cmd->has_outbox ? (u64) outbox->dma : + vhcr->out_param; + err = __mlx4_cmd(dev, in_param, &out_param, + cmd->out_is_imm, vhcr->in_modifier, + vhcr->op_modifier, vhcr->op, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (cmd->out_is_imm) { + vhcr->out_param = out_param; + vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); + } + } + + if (err) { + if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", + vhcr->op, slave, vhcr->errno, err); + vhcr_cmd->status = mlx4_errno_to_status(err); + goto out_status; + } + + + /* Write outbox if command completed successfully */ + if (cmd->has_outbox && !vhcr_cmd->status) { + ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave, + vhcr->out_param, + MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED); + if (ret) { + /* If we failed to write back the outbox after the + *command was successfully executed, we must fail this + * slave, as it is now in undefined state */ + if (!(dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_err(dev, "%s:Failed writing outbox\n", __func__); + goto out; + } + } + +out_status: + /* DMA back vhcr result */ + if (!in_vhcr) { + ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, + priv->mfunc.master.slave_state[slave].vhcr_dma, + ALIGN(sizeof(struct mlx4_vhcr), + MLX4_ACCESS_MEM_ALIGN), + MLX4_CMD_WRAPPED); + if (ret) + mlx4_err(dev, "%s:Failed writing vhcr result\n", + __func__); + else if (vhcr->e_bit && + mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) + mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n", + slave); + } + +out: + kfree(vhcr); + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} + +static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, + int slave, int port) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_vport_state *vp_admin; + struct mlx4_vf_immed_vlan_work *work; + struct mlx4_dev *dev = &(priv->dev); + int err; + int admin_vlan_ix = NO_INDX; + + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + if (vp_oper->state.default_vlan == vp_admin->default_vlan && + vp_oper->state.default_qos == vp_admin->default_qos && + vp_oper->state.link_state == vp_admin->link_state && + vp_oper->state.qos_vport == vp_admin->qos_vport) + return 0; + + if (!(priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) { + /* even if the UPDATE_QP command isn't supported, we still want + * to set this VF link according to the admin directive + */ + vp_oper->state.link_state = vp_admin->link_state; + return -1; + } + + mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", + slave, port); + mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", + vp_admin->default_vlan, vp_admin->default_qos, + vp_admin->link_state); + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + if (vp_oper->state.default_vlan != vp_admin->default_vlan) { + if (MLX4_VGT != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, + &admin_vlan_ix); + if (err) { + kfree(work); + mlx4_warn(&priv->dev, + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + } else { + admin_vlan_ix = NO_INDX; + } + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; + mlx4_dbg(&priv->dev, + "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_admin->default_vlan), + admin_vlan_ix, slave, port); + } + + /* save original vlan ix and vlan id */ + work->orig_vlan_id = vp_oper->state.default_vlan; + work->orig_vlan_ix = vp_oper->vlan_idx; + + /* handle new qos */ + if (vp_oper->state.default_qos != vp_admin->default_qos) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; + + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) + vp_oper->vlan_idx = admin_vlan_ix; + + vp_oper->state.default_vlan = vp_admin->default_vlan; + vp_oper->state.default_qos = vp_admin->default_qos; + vp_oper->state.link_state = vp_admin->link_state; + vp_oper->state.qos_vport = vp_admin->qos_vport; + + if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; + + /* iterate over QPs owned by this slave, using UPDATE_QP */ + work->port = port; + work->slave = slave; + work->qos = vp_oper->state.default_qos; + work->qos_vport = vp_oper->state.qos_vport; + work->vlan_id = vp_oper->state.default_vlan; + work->vlan_ix = vp_oper->vlan_idx; + work->priv = priv; + INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); + queue_work(priv->mfunc.master.comm_wq, &work->work); + + return 0; +} + +static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port) +{ + struct mlx4_qos_manager *port_qos_ctl; + struct mlx4_priv *priv = mlx4_priv(dev); + + port_qos_ctl = &priv->mfunc.master.qos_ctl[port]; + bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP); + + /* Enable only default prio at PF init routine */ + set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm); +} + +static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) +{ + int i; + int err; + int num_vfs; + u16 availible_vpp; + u8 vpp_param[MLX4_NUM_UP]; + struct mlx4_qos_manager *port_qos; + struct mlx4_priv *priv = mlx4_priv(dev); + + err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + if (err) { + mlx4_info(dev, "Failed query availible VPPs\n"); + return; + } + + port_qos = &priv->mfunc.master.qos_ctl[port]; + num_vfs = (availible_vpp / + bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP)); + + for (i = 0; i < MLX4_NUM_UP; i++) { + if (test_bit(i, port_qos->priority_bm)) + vpp_param[i] = num_vfs; + } + + err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param); + if (err) { + mlx4_info(dev, "Failed allocating VPPs\n"); + return; + } + + /* Query actual allocated VPP, just to make sure */ + err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + if (err) { + mlx4_info(dev, "Failed query availible VPPs\n"); + return; + } + + port_qos->num_of_qos_vfs = num_vfs; + mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp); + + for (i = 0; i < MLX4_NUM_UP; i++) + mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i, + vpp_param[i]); +} + +static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) +{ + int port, err; + struct mlx4_vport_state *vp_admin; + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; + priv->mfunc.master.vf_oper[slave].smi_enabled[port] = + priv->mfunc.master.vf_admin[slave].enable_smi[port]; + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + vp_oper->state = *vp_admin; + if (MLX4_VGT != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, &(vp_oper->vlan_idx)); + if (err) { + vp_oper->vlan_idx = NO_INDX; + mlx4_warn(&priv->dev, + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_oper->state.default_vlan), + vp_oper->vlan_idx, slave, port); + } + if (vp_admin->spoofchk) { + vp_oper->mac_idx = __mlx4_register_mac(&priv->dev, + port, + vp_admin->mac); + if (0 > vp_oper->mac_idx) { + err = vp_oper->mac_idx; + vp_oper->mac_idx = NO_INDX; + mlx4_warn(&priv->dev, + "No mac resources slave %d, port %d\n", + slave, port); + return err; + } + mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n", + vp_oper->state.mac, vp_oper->mac_idx, slave, port); + } + } + return 0; +} + +static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) +{ + int port; + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + + + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; + priv->mfunc.master.vf_oper[slave].smi_enabled[port] = + MLX4_VF_SMI_DISABLED; + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + if (NO_INDX != vp_oper->vlan_idx) { + __mlx4_unregister_vlan(&priv->dev, + port, vp_oper->state.default_vlan); + vp_oper->vlan_idx = NO_INDX; + } + if (NO_INDX != vp_oper->mac_idx) { + __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac); + vp_oper->mac_idx = NO_INDX; + } + } + return; +} + +static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, + u16 param, u8 toggle) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + u32 reply; + u8 is_going_down = 0; + int i; + unsigned long flags; + + slave_state[slave].comm_toggle ^= 1; + reply = (u32) slave_state[slave].comm_toggle << 31; + if (toggle != slave_state[slave].comm_toggle) { + mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n", + toggle, slave); + goto reset_slave; + } + if (cmd == MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "Received reset from slave:%d\n", slave); + slave_state[slave].active = false; + slave_state[slave].old_vlan_api = false; + mlx4_master_deactivate_admin_state(priv, slave); + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { + slave_state[slave].event_eq[i].eqn = -1; + slave_state[slave].event_eq[i].token = 0; + } + /*check if we are in the middle of FLR process, + if so return "retry" status to the slave*/ + if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) + goto inform_slave_state; + + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave); + + /* write the version in the event field */ + reply |= mlx4_comm_get_version(); + + goto reset_slave; + } + /*command from slave in the middle of FLR*/ + if (cmd != MLX4_COMM_CMD_RESET && + MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { + mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n", + slave, cmd); + return; + } + + switch (cmd) { + case MLX4_COMM_CMD_VHCR0: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) + goto reset_slave; + slave_state[slave].vhcr_dma = ((u64) param) << 48; + priv->mfunc.master.slave_state[slave].cookie = 0; + break; + case MLX4_COMM_CMD_VHCR1: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) + goto reset_slave; + slave_state[slave].vhcr_dma |= ((u64) param) << 32; + break; + case MLX4_COMM_CMD_VHCR2: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) + goto reset_slave; + slave_state[slave].vhcr_dma |= ((u64) param) << 16; + break; + case MLX4_COMM_CMD_VHCR_EN: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) + goto reset_slave; + slave_state[slave].vhcr_dma |= param; + if (mlx4_master_activate_admin_state(priv, slave)) + goto reset_slave; + slave_state[slave].active = true; + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); + break; + case MLX4_COMM_CMD_VHCR_POST: + if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && + (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) { + mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n", + slave, cmd, slave_state[slave].last_cmd); + goto reset_slave; + } + + mutex_lock(&priv->cmd.slave_cmd_mutex); + if (mlx4_master_process_vhcr(dev, slave, NULL)) { + mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n", + slave); + mutex_unlock(&priv->cmd.slave_cmd_mutex); + goto reset_slave; + } + mutex_unlock(&priv->cmd.slave_cmd_mutex); + break; + default: + mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); + goto reset_slave; + } + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + if (!slave_state[slave].is_slave_going_down) + slave_state[slave].last_cmd = cmd; + else + is_going_down = 1; + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + if (is_going_down) { + mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n", + cmd, slave); + return; + } + __raw_writel((__force u32) cpu_to_be32(reply), + &priv->mfunc.comm[slave].slave_read); + mmiowb(); + + return; + +reset_slave: + /* cleanup any slave resources */ + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_delete_all_resources_for_slave(dev, slave); + + if (cmd != MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n", + slave, cmd); + /* Turn on internal error letting slave reset itself immeditaly, + * otherwise it might take till timeout on command is passed + */ + reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR); + } + + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + if (!slave_state[slave].is_slave_going_down) + slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + /*with slave in the middle of flr, no need to clean resources again.*/ +inform_slave_state: + memset(&slave_state[slave].event_eq, 0, + sizeof(struct mlx4_slave_event_eq_info)); + __raw_writel((__force u32) cpu_to_be32(reply), + &priv->mfunc.comm[slave].slave_read); + wmb(); +} + +/* master command processing */ +void mlx4_master_comm_channel(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, + struct mlx4_mfunc_master_ctx, + comm_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = + container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + __be32 *bit_vec; + u32 comm_cmd; + u32 vec; + int i, j, slave; + int toggle; + int served = 0; + int reported = 0; + u32 slt; + + bit_vec = master->comm_arm_bit_vector; + for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) { + vec = be32_to_cpu(bit_vec[i]); + for (j = 0; j < 32; j++) { + if (!(vec & (1 << j))) + continue; + ++reported; + slave = (i * 32) + j; + comm_cmd = swab32(readl( + &mfunc->comm[slave].slave_write)); + slt = swab32(readl(&mfunc->comm[slave].slave_read)) + >> 31; + toggle = comm_cmd >> 31; + if (toggle != slt) { + if (master->slave_state[slave].comm_toggle + != slt) { + pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n", + slave, slt, + master->slave_state[slave].comm_toggle); + master->slave_state[slave].comm_toggle = + slt; + } + mlx4_master_do_cmd(dev, slave, + comm_cmd >> 16 & 0xff, + comm_cmd & 0xffff, toggle); + ++served; + } + } + } + + if (reported && reported != served) + mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n", + reported, served); + + if (mlx4_ARM_COMM_CHANNEL(dev)) + mlx4_warn(dev, "Failed to arm comm channel events\n"); +} + +static int sync_toggles(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 wr_toggle; + u32 rd_toggle; + unsigned long end; + + wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)); + if (wr_toggle == 0xffffffff) + end = jiffies + msecs_to_jiffies(30000); + else + end = jiffies + msecs_to_jiffies(5000); + + while (time_before(jiffies, end)) { + rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); + if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { + /* PCI might be offline */ + msleep(100); + wr_toggle = swab32(readl(&priv->mfunc.comm-> + slave_write)); + continue; + } + + if (rd_toggle >> 31 == wr_toggle >> 31) { + priv->cmd.comm_toggle = rd_toggle >> 31; + return 0; + } + + cond_resched(); + } + + /* + * we could reach here if for example the previous VM using this + * function misbehaved and left the channel with unsynced state. We + * should fix this here and give this VM a chance to use a properly + * synced channel + */ + mlx4_warn(dev, "recovering from previously mis-behaved VM\n"); + __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read); + __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write); + priv->cmd.comm_toggle = 0; + + return 0; +} + +int mlx4_multi_func_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state; + int i, j, err, port; + + if (mlx4_is_master(dev)) + priv->mfunc.comm = + ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.comm_bar) + + priv->fw.comm_base, MLX4_COMM_PAGESIZE); + else + priv->mfunc.comm = + ioremap(pci_resource_start(dev->persist->pdev, 2) + + MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); + if (!priv->mfunc.comm) { + mlx4_err(dev, "Couldn't map communication vector\n"); + goto err_vhcr; + } + + if (mlx4_is_master(dev)) { + struct mlx4_vf_oper_state *vf_oper; + struct mlx4_vf_admin_state *vf_admin; + + priv->mfunc.master.slave_state = + kzalloc(dev->num_slaves * + sizeof(struct mlx4_slave_state), GFP_KERNEL); + if (!priv->mfunc.master.slave_state) + goto err_comm; + + priv->mfunc.master.vf_admin = + kzalloc(dev->num_slaves * + sizeof(struct mlx4_vf_admin_state), GFP_KERNEL); + if (!priv->mfunc.master.vf_admin) + goto err_comm_admin; + + priv->mfunc.master.vf_oper = + kzalloc(dev->num_slaves * + sizeof(struct mlx4_vf_oper_state), GFP_KERNEL); + if (!priv->mfunc.master.vf_oper) + goto err_comm_oper; + + for (i = 0; i < dev->num_slaves; ++i) { + vf_admin = &priv->mfunc.master.vf_admin[i]; + vf_oper = &priv->mfunc.master.vf_oper[i]; + s_state = &priv->mfunc.master.slave_state[i]; + s_state->last_cmd = MLX4_COMM_CMD_RESET; + mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); + for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) + s_state->event_eq[j].eqn = -1; + __raw_writel((__force u32) 0, + &priv->mfunc.comm[i].slave_write); + __raw_writel((__force u32) 0, + &priv->mfunc.comm[i].slave_read); + mmiowb(); + for (port = 1; port <= MLX4_MAX_PORTS; port++) { + struct mlx4_vport_state *admin_vport; + struct mlx4_vport_state *oper_vport; + + s_state->vlan_filter[port] = + kzalloc(sizeof(struct mlx4_vlan_fltr), + GFP_KERNEL); + if (!s_state->vlan_filter[port]) { + if (--port) + kfree(s_state->vlan_filter[port]); + goto err_slaves; + } + + admin_vport = &vf_admin->vport[port]; + oper_vport = &vf_oper->vport[port].state; + INIT_LIST_HEAD(&s_state->mcast_filters[port]); + admin_vport->default_vlan = MLX4_VGT; + oper_vport->default_vlan = MLX4_VGT; + admin_vport->qos_vport = + MLX4_VPP_DEFAULT_VPORT; + oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT; + vf_oper->vport[port].vlan_idx = NO_INDX; + vf_oper->vport[port].mac_idx = NO_INDX; + mlx4_set_random_admin_guid(dev, i, port); + } + spin_lock_init(&s_state->lock); + } + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) { + for (port = 1; port <= dev->caps.num_ports; port++) { + if (mlx4_is_eth(dev, port)) { + mlx4_set_default_port_qos(dev, port); + mlx4_allocate_port_vpps(dev, port); + } + } + } + + memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); + priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; + INIT_WORK(&priv->mfunc.master.comm_work, + mlx4_master_comm_channel); + INIT_WORK(&priv->mfunc.master.slave_event_work, + mlx4_gen_slave_eqe); + INIT_WORK(&priv->mfunc.master.slave_flr_event_work, + mlx4_master_handle_slave_flr); + spin_lock_init(&priv->mfunc.master.slave_state_lock); + spin_lock_init(&priv->mfunc.master.slave_eq.event_lock); + priv->mfunc.master.comm_wq = + create_singlethread_workqueue("mlx4_comm"); + if (!priv->mfunc.master.comm_wq) + goto err_slaves; + + if (mlx4_init_resource_tracker(dev)) + goto err_thread; + + } else { + err = sync_toggles(dev); + if (err) { + mlx4_err(dev, "Couldn't sync toggles\n"); + goto err_comm; + } + } + return 0; + +err_thread: + flush_workqueue(priv->mfunc.master.comm_wq); + destroy_workqueue(priv->mfunc.master.comm_wq); +err_slaves: + while (--i) { + for (port = 1; port <= MLX4_MAX_PORTS; port++) + kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); + } + kfree(priv->mfunc.master.vf_oper); +err_comm_oper: + kfree(priv->mfunc.master.vf_admin); +err_comm_admin: + kfree(priv->mfunc.master.slave_state); +err_comm: + iounmap(priv->mfunc.comm); +err_vhcr: + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + priv->mfunc.vhcr, + priv->mfunc.vhcr_dma); + priv->mfunc.vhcr = NULL; + return -ENOMEM; +} + +int mlx4_cmd_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int flags = 0; + + if (!priv->cmd.initialized) { + mutex_init(&priv->cmd.slave_cmd_mutex); + sema_init(&priv->cmd.poll_sem, 1); + priv->cmd.use_events = 0; + priv->cmd.toggle = 1; + priv->cmd.initialized = 1; + flags |= MLX4_CMD_CLEANUP_STRUCT; + } + + if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { + priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev, + 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); + if (!priv->cmd.hcr) { + mlx4_err(dev, "Couldn't map command register\n"); + goto err; + } + flags |= MLX4_CMD_CLEANUP_HCR; + } + + if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { + priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, + &priv->mfunc.vhcr_dma, + GFP_KERNEL); + if (!priv->mfunc.vhcr) + goto err; + + flags |= MLX4_CMD_CLEANUP_VHCR; + } + + if (!priv->cmd.pool) { + priv->cmd.pool = pci_pool_create("mlx4_cmd", + dev->persist->pdev, + MLX4_MAILBOX_SIZE, + MLX4_MAILBOX_SIZE, 0); + if (!priv->cmd.pool) + goto err; + + flags |= MLX4_CMD_CLEANUP_POOL; + } + + return 0; + +err: + mlx4_cmd_cleanup(dev, flags); + return -ENOMEM; +} + +void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int slave; + u32 slave_read; + + /* Report an internal error event to all + * communication channels. + */ + for (slave = 0; slave < dev->num_slaves; slave++) { + slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read)); + slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR; + __raw_writel((__force u32)cpu_to_be32(slave_read), + &priv->mfunc.comm[slave].slave_read); + /* Make sure that our comm channel write doesn't + * get mixed in with writes from another CPU. + */ + mmiowb(); + } +} + +void mlx4_multi_func_cleanup(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, port; + + if (mlx4_is_master(dev)) { + flush_workqueue(priv->mfunc.master.comm_wq); + destroy_workqueue(priv->mfunc.master.comm_wq); + for (i = 0; i < dev->num_slaves; i++) { + for (port = 1; port <= MLX4_MAX_PORTS; port++) + kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); + } + kfree(priv->mfunc.master.slave_state); + kfree(priv->mfunc.master.vf_admin); + kfree(priv->mfunc.master.vf_oper); + dev->num_slaves = 0; + } + + iounmap(priv->mfunc.comm); +} + +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { + pci_pool_destroy(priv->cmd.pool); + priv->cmd.pool = NULL; + } + + if (!mlx4_is_slave(dev) && priv->cmd.hcr && + (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) { + iounmap(priv->cmd.hcr); + priv->cmd.hcr = NULL; + } + if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && + (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + priv->mfunc.vhcr, priv->mfunc.vhcr_dma); + priv->mfunc.vhcr = NULL; + } + if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT)) + priv->cmd.initialized = 0; +} + +/* + * Switch to using events to issue FW commands (can only be called + * after event queue for command events has been initialized). + */ +int mlx4_cmd_use_events(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + int err = 0; + + priv->cmd.context = kmalloc(priv->cmd.max_cmds * + sizeof (struct mlx4_cmd_context), + GFP_KERNEL); + if (!priv->cmd.context) + return -ENOMEM; + + for (i = 0; i < priv->cmd.max_cmds; ++i) { + priv->cmd.context[i].token = i; + priv->cmd.context[i].next = i + 1; + /* To support fatal error flow, initialize all + * cmd contexts to allow simulating completions + * with complete() at any time. + */ + init_completion(&priv->cmd.context[i].done); + } + + priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; + priv->cmd.free_head = 0; + + sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); + spin_lock_init(&priv->cmd.context_lock); + + for (priv->cmd.token_mask = 1; + priv->cmd.token_mask < priv->cmd.max_cmds; + priv->cmd.token_mask <<= 1) + ; /* nothing */ + --priv->cmd.token_mask; + + down(&priv->cmd.poll_sem); + priv->cmd.use_events = 1; + + return err; +} + +/* + * Switch back to polling (used when shutting down the device) + */ +void mlx4_cmd_use_polling(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + priv->cmd.use_events = 0; + + for (i = 0; i < priv->cmd.max_cmds; ++i) + down(&priv->cmd.event_sem); + + kfree(priv->cmd.context); + + up(&priv->cmd.poll_sem); +} + +struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, + &mailbox->dma); + if (!mailbox->buf) { + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + + return mailbox; +} +EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); + +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox) +{ + if (!mailbox) + return; + + pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} +EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); + +u32 mlx4_comm_get_version(void) +{ + return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; +} + +static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) +{ + if ((vf < 0) || (vf >= dev->persist->num_vfs)) { + mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", + vf, dev->persist->num_vfs); + return -EINVAL; + } + + return vf+1; +} + +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) +{ + if (slave < 1 || slave > dev->persist->num_vfs) { + mlx4_err(dev, + "Bad slave number:%d (number of activated slaves: %lu)\n", + slave, dev->num_slaves); + return -EINVAL; + } + return slave - 1; +} + +void mlx4_cmd_wake_completions(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_context *context; + int i; + + spin_lock(&priv->cmd.context_lock); + if (priv->cmd.context) { + for (i = 0; i < priv->cmd.max_cmds; ++i) { + context = &priv->cmd.context[i]; + context->fw_status = CMD_STAT_INTERNAL_ERR; + context->result = + mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + complete(&context->done); + } + } + spin_unlock(&priv->cmd.context_lock); +} + +struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + int vf; + + bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS); + + if (slave == 0) { + bitmap_fill(actv_ports.ports, dev->caps.num_ports); + return actv_ports; + } + + vf = mlx4_get_vf_indx(dev, slave); + if (vf < 0) + return actv_ports; + + bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1, + min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports, + dev->caps.num_ports)); + + return actv_ports; +} +EXPORT_SYMBOL_GPL(mlx4_get_active_ports); + +int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port) +{ + unsigned n; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port <= 0 || port > m) + return -EINVAL; + + n = find_first_bit(actv_ports.ports, dev->caps.num_ports); + if (port <= n) + port = n + 1; + + return port; +} +EXPORT_SYMBOL_GPL(mlx4_slave_convert_port); + +int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + if (test_bit(port - 1, actv_ports.ports)) + return port - + find_first_bit(actv_ports.ports, dev->caps.num_ports); + + return -1; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, + int port) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + if (port <= 0 || port > dev->caps.num_ports) + return slaves_pport; + + for (i = 0; i < dev->persist->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (test_bit(port - 1, actv_ports.ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( + struct mlx4_dev *dev, + const struct mlx4_active_ports *crit_ports) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + for (i = 0; i < dev->persist->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (bitmap_equal(crit_ports->ports, actv_ports.ports, + dev->caps.num_ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); + +static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + 1; + int max_port = min_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port < min_port) + port = min_port; + else if (port >= max_port) + port = max_port - 1; + + return port; +} + +static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port, + int max_tx_rate) +{ + int i; + int err; + struct mlx4_qos_manager *port_qos; + struct mlx4_dev *dev = &priv->dev; + struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP]; + + port_qos = &priv->mfunc.master.qos_ctl[port]; + memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP); + + if (slave > port_qos->num_of_qos_vfs) { + mlx4_info(dev, "No availible VPP resources for this VF\n"); + return -EINVAL; + } + + /* Query for default QoS values from Vport 0 is needed */ + err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos); + if (err) { + mlx4_info(dev, "Failed to query Vport 0 QoS values\n"); + return err; + } + + for (i = 0; i < MLX4_NUM_UP; i++) { + if (test_bit(i, port_qos->priority_bm) && max_tx_rate) { + vpp_qos[i].max_avg_bw = max_tx_rate; + vpp_qos[i].enable = 1; + } else { + /* if user supplied tx_rate == 0, meaning no rate limit + * configuration is required. so we are leaving the + * value of max_avg_bw as queried from Vport 0. + */ + vpp_qos[i].enable = 0; + } + } + + err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos); + if (err) { + mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave); + return err; + } + + return 0; +} + +static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port, + struct mlx4_vport_state *vf_admin) +{ + struct mlx4_qos_manager *info; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) + return false; + + info = &priv->mfunc.master.qos_ctl[port]; + + if (vf_admin->default_vlan != MLX4_VGT && + test_bit(vf_admin->default_qos, info->priority_bm)) + return true; + + return false; +} + +static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port, + struct mlx4_vport_state *vf_admin, + int vlan, int qos) +{ + struct mlx4_vport_state dummy_admin = {0}; + + if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) || + !vf_admin->tx_rate) + return true; + + dummy_admin.default_qos = qos; + dummy_admin.default_vlan = vlan; + + /* VF wants to move to other VST state which is valid with current + * rate limit. Either differnt default vlan in VST or other + * supported QoS priority. Otherwise we don't allow this change when + * the TX rate is still configured. + */ + if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin)) + return true; + + mlx4_info(dev, "Cannot change VF state to %s while rate is set\n", + (vlan == MLX4_VGT) ? "VGT" : "VST"); + + if (vlan != MLX4_VGT) + mlx4_info(dev, "VST priority %d not supported for QoS\n", qos); + + mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n"); + + return false; +} + +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if (!mlx4_is_master(dev)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->mac = mac; + mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", + vf, port, s_info->mac); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); + + +int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *vf_admin; + int slave; + + if ((!mlx4_is_master(dev)) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) + return -EPROTONOSUPPORT; + + if ((vlan > 4095) || (qos > 7)) + return -EINVAL; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos)) + return -EPERM; + + if ((0 == vlan) && (0 == qos)) + vf_admin->default_vlan = MLX4_VGT; + else + vf_admin->default_vlan = vlan; + vf_admin->default_qos = qos; + + /* If rate was configured prior to VST, we saved the configured rate + * in vf_admin->rate and now, if priority supported we enforce the QoS + */ + if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) && + vf_admin->tx_rate) + vf_admin->qos_vport = slave; + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_info(dev, + "updating vf %d port %d config will take effect on next VF restart\n", + vf, port); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); + +int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, + int max_tx_rate) +{ + int err; + int slave; + struct mlx4_vport_state *vf_admin; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) + return -EPROTONOSUPPORT; + + if (min_tx_rate) { + mlx4_info(dev, "Minimum BW share not supported\n"); + return -EPROTONOSUPPORT; + } + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate); + if (err) { + mlx4_info(dev, "vf %d failed to set rate %d\n", vf, + max_tx_rate); + return err; + } + + vf_admin->tx_rate = max_tx_rate; + /* if VF is not in supported mode (VST with supported prio), + * we do not change vport configuration for its QPs, but save + * the rate, so it will be enforced when it moves to supported + * mode next time. + */ + if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) { + mlx4_info(dev, + "rate set for VF %d when not in valid state\n", vf); + + if (vf_admin->default_vlan != MLX4_VGT) + mlx4_info(dev, "VST priority not supported by QoS\n"); + else + mlx4_info(dev, "VF in VGT mode (needed VST)\n"); + + mlx4_info(dev, + "rate %d take affect when VF moves to valid state\n", + max_tx_rate); + return 0; + } + + /* If user sets rate 0 assigning default vport for its QPs */ + vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT; + + if (priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) + mlx4_master_immediate_activate_vlan_qos(priv, slave, port); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_rate); + + /* mlx4_get_slave_default_vlan - + * return true if VST ( default vlan) + * if VST, will return vlan & qos (if not NULL) + */ +bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, + u16 *vlan, u8 *qos) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_priv *priv; + + priv = mlx4_priv(dev); + port = mlx4_slaves_closest_port(dev, slave, port); + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + + if (MLX4_VGT != vp_oper->state.default_vlan) { + if (vlan) + *vlan = vp_oper->state.default_vlan; + if (qos) + *qos = vp_oper->state.default_qos; + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan); + +int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if ((!mlx4_is_master(dev)) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->spoofchk = setting; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); + +int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if (!mlx4_is_master(dev)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + ivf->vf = vf; + + /* need to convert it to a func */ + ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff); + ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff); + ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff); + ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff); + ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); + ivf->mac[5] = ((s_info->mac) & 0xff); + + ivf->vlan = s_info->default_vlan; + ivf->qos = s_info->default_qos; + + if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info)) + ivf->max_tx_rate = s_info->tx_rate; + else + ivf->max_tx_rate = 0; + + ivf->min_tx_rate = 0; + ivf->spoofchk = s_info->spoofchk; + ivf->linkstate = s_info->link_state; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_get_vf_config); + +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + u8 link_stat_event; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + /* get current link state */ + if (!priv->sense.do_sense_port[port]) + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + else + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + case IFLA_VF_LINK_STATE_ENABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + break; + + case IFLA_VF_LINK_STATE_DISABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + default: + mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", + link_state, slave, port); + return -EINVAL; + }; + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->link_state = link_state; + + /* send event */ + mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_dbg(dev, + "updating vf %d port %d no link state HW enforcment\n", + vf, port); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); + +int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave < 1 || slave >= dev->num_slaves || + port < 1 || port > MLX4_MAX_PORTS) + return 0; + + return priv->mfunc.master.vf_oper[slave].smi_enabled[port] == + MLX4_VF_SMI_ENABLED; +} +EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled); + +int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave == mlx4_master_func_num(dev)) + return 1; + + if (slave < 1 || slave >= dev->num_slaves || + port < 1 || port > MLX4_MAX_PORTS) + return 0; + + return priv->mfunc.master.vf_admin[slave].enable_smi[port] == + MLX4_VF_SMI_ENABLED; +} +EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin); + +int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, + int enabled) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave == mlx4_master_func_num(dev)) + return 0; + + if (slave < 1 || slave >= dev->num_slaves || + port < 1 || port > MLX4_MAX_PORTS || + enabled < 0 || enabled > 1) + return -EINVAL; + + priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled; + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin); diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c new file mode 100644 index 000000000..e71f31387 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/hardirq.h> +#include <linux/export.h> + +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/cq.h> + +#include "mlx4.h" +#include "icm.h" + +#define MLX4_CQ_STATUS_OK ( 0 << 28) +#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) +#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) +#define MLX4_CQ_FLAG_CC ( 1 << 18) +#define MLX4_CQ_FLAG_OI ( 1 << 17) +#define MLX4_CQ_STATE_ARMED ( 9 << 8) +#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) +#define MLX4_EQ_STATE_FIRED (10 << 8) + +#define TASKLET_MAX_TIME 2 +#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME) + +void mlx4_cq_tasklet_cb(unsigned long data) +{ + unsigned long flags; + unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES; + struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data; + struct mlx4_cq *mcq, *temp; + + spin_lock_irqsave(&ctx->lock, flags); + list_splice_tail_init(&ctx->list, &ctx->process_list); + spin_unlock_irqrestore(&ctx->lock, flags); + + list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { + list_del_init(&mcq->tasklet_ctx.list); + mcq->tasklet_ctx.comp(mcq); + if (atomic_dec_and_test(&mcq->refcount)) + complete(&mcq->free); + if (time_after(jiffies, end)) + break; + } + + if (!list_empty(&ctx->process_list)) + tasklet_schedule(&ctx->task); +} + +static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) +{ + unsigned long flags; + struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; + + spin_lock_irqsave(&tasklet_ctx->lock, flags); + /* When migrating CQs between EQs will be implemented, please note + * that you need to sync this point. It is possible that + * while migrating a CQ, completions on the old EQs could + * still arrive. + */ + if (list_empty_careful(&cq->tasklet_ctx.list)) { + atomic_inc(&cq->refcount); + list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); + } + spin_unlock_irqrestore(&tasklet_ctx->lock, flags); +} + +void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) +{ + struct mlx4_cq *cq; + + cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, + cqn & (dev->caps.num_cqs - 1)); + if (!cq) { + mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); +} + +void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) +{ + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + struct mlx4_cq *cq; + + spin_lock(&cq_table->lock); + + cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&cq_table->lock); + + if (!cq) { + mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num) +{ + return mlx4_cmd(dev, mailbox->dma, cq_num, 0, + MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num, u32 opmod) +{ + return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, + cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, + u16 count, u16 period) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_cq_context *cq_context; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cq_context = mailbox->buf; + cq_context->cq_max_count = cpu_to_be16(count); + cq_context->cq_period = cpu_to_be16(period); + + err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_cq_modify); + +int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, + int entries, struct mlx4_mtt *mtt) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_cq_context *cq_context; + u64 mtt_addr; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cq_context = mailbox->buf; + cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); + cq_context->log_page_size = mtt->page_shift - 12; + mtt_addr = mlx4_mtt_addr(dev, mtt); + cq_context->mtt_base_addr_h = mtt_addr >> 32; + cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + + err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_cq_resize); + +int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + int err; + + *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); + if (*cqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL); + if (err) + goto err_put; + return 0; + +err_put: + mlx4_table_put(dev, &cq_table->table, *cqn); + +err_out: + mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); + return err; +} + +static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + else { + *cqn = get_param_l(&out_param); + return 0; + } + } + return __mlx4_cq_alloc_icm(dev, cqn); +} + +void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + + mlx4_table_put(dev, &cq_table->cmpt_table, cqn); + mlx4_table_put(dev, &cq_table->table, cqn); + mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR); +} + +static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, cqn); + err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed freeing cq:%d\n", cqn); + } else + __mlx4_cq_free_icm(dev, cqn); +} + +int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, + struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, + struct mlx4_cq *cq, unsigned vector, int collapsed, + int timestamp_en) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_cq_context *cq_context; + u64 mtt_addr; + int err; + + if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) + return -EINVAL; + + cq->vector = vector; + + err = mlx4_cq_alloc_icm(dev, &cq->cqn); + if (err) + return err; + + spin_lock_irq(&cq_table->lock); + err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); + spin_unlock_irq(&cq_table->lock); + if (err) + goto err_icm; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_radix; + } + + cq_context = mailbox->buf; + cq_context->flags = cpu_to_be32(!!collapsed << 18); + if (timestamp_en) + cq_context->flags |= cpu_to_be32(1 << 19); + + cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); + cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; + cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, mtt); + cq_context->mtt_base_addr_h = mtt_addr >> 32; + cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + cq_context->db_rec_addr = cpu_to_be64(db_rec); + + err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + goto err_radix; + + cq->cons_index = 0; + cq->arm_sn = 1; + cq->uar = uar; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + cq->comp = mlx4_add_cq_to_tasklet; + cq->tasklet_ctx.priv = + &priv->eq_table.eq[cq->vector].tasklet_ctx; + INIT_LIST_HEAD(&cq->tasklet_ctx.list); + + + cq->irq = priv->eq_table.eq[cq->vector].irq; + return 0; + +err_radix: + spin_lock_irq(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock_irq(&cq_table->lock); + +err_icm: + mlx4_cq_free_icm(dev, cq->cqn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_cq_alloc); + +void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + int err; + + err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); + if (err) + mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + + synchronize_irq(priv->eq_table.eq[cq->vector].irq); + + spin_lock_irq(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock_irq(&cq_table->lock); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + mlx4_cq_free_icm(dev, cq->cqn); +} +EXPORT_SYMBOL_GPL(mlx4_cq_free); + +int mlx4_init_cq_table(struct mlx4_dev *dev) +{ + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + int err; + + spin_lock_init(&cq_table->lock); + INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; + + err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, + dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); + if (err) + return err; + + return 0; +} + +void mlx4_cleanup_cq_table(struct mlx4_dev *dev) +{ + if (mlx4_is_slave(dev)) + return; + /* Nothing to do to clean up radix_tree */ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c new file mode 100644 index 000000000..8a083d73e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2012 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/mlx4/device.h> +#include <linux/clocksource.h> + +#include "mlx4_en.h" + +/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) + */ +static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) +{ + struct mlx4_en_dev *mdev = + container_of(tc, struct mlx4_en_dev, cycles); + struct mlx4_dev *dev = mdev->dev; + + return mlx4_read_clock(dev) & tc->mask; +} + +u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) +{ + u64 hi, lo; + struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe; + + lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo); + hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16; + + return hi | lo; +} + +void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, + struct skb_shared_hwtstamps *hwts, + u64 timestamp) +{ + unsigned long flags; + u64 nsec; + + read_lock_irqsave(&mdev->clock_lock, flags); + nsec = timecounter_cyc2time(&mdev->clock, timestamp); + read_unlock_irqrestore(&mdev->clock_lock, flags); + + memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); + hwts->hwtstamp = ns_to_ktime(nsec); +} + +/** + * mlx4_en_remove_timestamp - disable PTP device + * @mdev: board private structure + * + * Stop the PTP support. + **/ +void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) +{ + if (mdev->ptp_clock) { + ptp_clock_unregister(mdev->ptp_clock); + mdev->ptp_clock = NULL; + mlx4_info(mdev, "removed PHC\n"); + } +} + +void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) +{ + bool timeout = time_is_before_jiffies(mdev->last_overflow_check + + mdev->overflow_period); + unsigned long flags; + + if (timeout) { + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_read(&mdev->clock); + write_unlock_irqrestore(&mdev->clock_lock, flags); + mdev->last_overflow_check = jiffies; + } +} + +/** + * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated delta from + * the base frequency. + **/ +static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + u64 adj; + u32 diff, mult; + int neg_adj = 0; + unsigned long flags; + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + mult = mdev->nominal_c_mult; + adj = mult; + adj *= delta; + diff = div_u64(adj, 1000000000ULL); + + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_read(&mdev->clock); + mdev->cycles.mult = neg_adj ? mult - diff : mult + diff; + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + **/ +static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + unsigned long flags; + + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_adjtime(&mdev->clock, delta); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + **/ +static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + unsigned long flags; + u64 ns; + + write_lock_irqsave(&mdev->clock_lock, flags); + ns = timecounter_read(&mdev->clock); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * mlx4_en_phc_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset the timecounter to use a new base value instead of the kernel + * wall timer value. + **/ +static int mlx4_en_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + u64 ns = timespec64_to_ns(ts); + unsigned long flags; + + /* reset the timecounter */ + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_init(&mdev->clock, &mdev->cycles, ns); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + **/ +static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info mlx4_en_ptp_clock_info = { + .owner = THIS_MODULE, + .max_adj = 100000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = mlx4_en_phc_adjfreq, + .adjtime = mlx4_en_phc_adjtime, + .gettime64 = mlx4_en_phc_gettime, + .settime64 = mlx4_en_phc_settime, + .enable = mlx4_en_phc_enable, +}; + +void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) +{ + struct mlx4_dev *dev = mdev->dev; + unsigned long flags; + u64 ns, zero = 0; + + rwlock_init(&mdev->clock_lock); + + memset(&mdev->cycles, 0, sizeof(mdev->cycles)); + mdev->cycles.read = mlx4_en_read_clock; + mdev->cycles.mask = CLOCKSOURCE_MASK(48); + /* Using shift to make calculation more accurate. Since current HW + * clock frequency is 427 MHz, and cycles are given using a 48 bits + * register, the biggest shift when calculating using u64, is 14 + * (max_cycles * multiplier < 2^64) + */ + mdev->cycles.shift = 14; + mdev->cycles.mult = + clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); + mdev->nominal_c_mult = mdev->cycles.mult; + + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_init(&mdev->clock, &mdev->cycles, + ktime_to_ns(ktime_get_real())); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + /* Calculate period in seconds to call the overflow watchdog - to make + * sure counter is checked at least once every wrap around. + */ + ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero); + do_div(ns, NSEC_PER_SEC / 2 / HZ); + mdev->overflow_period = ns; + + /* Configure the PHC */ + mdev->ptp_clock_info = mlx4_en_ptp_clock_info; + snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); + + mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, + &mdev->pdev->dev); + if (IS_ERR(mdev->ptp_clock)) { + mdev->ptp_clock = NULL; + mlx4_err(mdev, "ptp_clock_register failed\n"); + } else { + mlx4_info(mdev, "registered PHC clock\n"); + } + +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c new file mode 100644 index 000000000..22da4d0d0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/mlx4/cq.h> +#include <linux/mlx4/qp.h> +#include <linux/mlx4/cmd.h> + +#include "mlx4_en.h" + +static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) +{ + return; +} + + +int mlx4_en_create_cq(struct mlx4_en_priv *priv, + struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, + int node) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq; + int err; + + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); + if (!cq) { + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) { + en_err(priv, "Failed to allocate CQ structure\n"); + return -ENOMEM; + } + } + + cq->size = entries; + cq->buf_size = cq->size * mdev->dev->caps.cqe_size; + + cq->ring = ring; + cq->is_tx = mode; + + /* Allocate HW buffers on provided NUMA node. + * dev->numa_node is used in mtt range allocation flow. + */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); + err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, + cq->buf_size, 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); + if (err) + goto err_cq; + + err = mlx4_en_map_buffer(&cq->wqres.buf); + if (err) + goto err_res; + + cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; + *pcq = cq; + + return 0; + +err_res: + mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); +err_cq: + kfree(cq); + *pcq = NULL; + return err; +} + +int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + int cq_idx) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + char name[25]; + int timestamp_en = 0; + struct cpu_rmap *rmap = +#ifdef CONFIG_RFS_ACCEL + priv->dev->rx_cpu_rmap; +#else + NULL; +#endif + + cq->dev = mdev->pndev[priv->port]; + cq->mcq.set_ci_db = cq->wqres.db.db; + cq->mcq.arm_db = cq->wqres.db.db + 1; + *cq->mcq.set_ci_db = 0; + *cq->mcq.arm_db = 0; + memset(cq->buf, 0, cq->buf_size); + + if (cq->is_tx == RX) { + if (mdev->dev->caps.comp_pool) { + if (!cq->vector) { + sprintf(name, "%s-%d", priv->dev->name, + cq->ring); + /* Set IRQ for specific name (per ring) */ + if (mlx4_assign_eq(mdev->dev, name, rmap, + &cq->vector)) { + cq->vector = (cq->ring + 1 + priv->port) + % mdev->dev->caps.num_comp_vectors; + mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", + name); + } + + } + } else { + cq->vector = (cq->ring + 1 + priv->port) % + mdev->dev->caps.num_comp_vectors; + } + + cq->irq_desc = + irq_to_desc(mlx4_eq_get_irq(mdev->dev, + cq->vector)); + } else { + /* For TX we use the same irq per + ring we assigned for the RX */ + struct mlx4_en_cq *rx_cq; + + cq_idx = cq_idx % priv->rx_ring_num; + rx_cq = priv->rx_cq[cq_idx]; + cq->vector = rx_cq->vector; + } + + if (!cq->is_tx) + cq->size = priv->rx_ring[cq->ring]->actual_size; + + if ((cq->is_tx && priv->hwtstamp_config.tx_type) || + (!cq->is_tx && priv->hwtstamp_config.rx_filter)) + timestamp_en = 1; + + err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, + &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, + cq->vector, 0, timestamp_en); + if (err) + return err; + + cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; + cq->mcq.event = mlx4_en_cq_event; + + if (cq->is_tx) { + netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, + NAPI_POLL_WEIGHT); + } else { + struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; + + err = irq_set_affinity_hint(cq->mcq.irq, + ring->affinity_mask); + if (err) + mlx4_warn(mdev, "Failed setting affinity hint\n"); + + netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); + napi_hash_add(&cq->napi); + } + + napi_enable(&cq->napi); + + return 0; +} + +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq = *pcq; + + mlx4_en_unmap_buffer(&cq->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); + if (priv->mdev->dev->caps.comp_pool && cq->vector) { + mlx4_release_eq(priv->mdev->dev, cq->vector); + } + cq->vector = 0; + cq->buf_size = 0; + cq->buf = NULL; + kfree(cq); + *pcq = NULL; +} + +void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +{ + napi_disable(&cq->napi); + if (!cq->is_tx) { + napi_hash_del(&cq->napi); + synchronize_rcu(); + irq_set_affinity_hint(cq->mcq.irq, NULL); + } + netif_napi_del(&cq->napi); + + mlx4_cq_free(priv->mdev->dev, &cq->mcq); +} + +/* Set rx cq moderation parameters */ +int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +{ + return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, + cq->moder_cnt, cq->moder_time); +} + +int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +{ + mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, + &priv->mdev->uar_lock); + + return 0; +} + + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c new file mode 100644 index 000000000..f01918c63 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2011 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/dcbnl.h> +#include <linux/math64.h> + +#include "mlx4_en.h" +#include "fw_qos.h" + +/* Definitions for QCN + */ + +struct mlx4_congestion_control_mb_prio_802_1_qau_params { + __be32 modify_enable_high; + __be32 modify_enable_low; + __be32 reserved1; + __be32 extended_enable; + __be32 rppp_max_rps; + __be32 rpg_time_reset; + __be32 rpg_byte_reset; + __be32 rpg_threshold; + __be32 rpg_max_rate; + __be32 rpg_ai_rate; + __be32 rpg_hai_rate; + __be32 rpg_gd; + __be32 rpg_min_dec_fac; + __be32 rpg_min_rate; + __be32 max_time_rise; + __be32 max_byte_rise; + __be32 max_qdelta; + __be32 min_qoffset; + __be32 gd_coefficient; + __be32 reserved2[5]; + __be32 cp_sample_base; + __be32 reserved3[39]; +}; + +struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { + __be64 rppp_rp_centiseconds; + __be32 reserved1; + __be32 ignored_cnm; + __be32 rppp_created_rps; + __be32 estimated_total_rate; + __be32 max_active_rate_limiter_index; + __be32 dropped_cnms_busy_fw; + __be32 reserved2; + __be32 cnms_handled_successfully; + __be32 min_total_limiters_rate; + __be32 max_total_limiters_rate; + __be32 reserved3[4]; +}; + +static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ieee_ets *my_ets = &priv->ets; + + /* No IEEE PFC settings available */ + if (!my_ets) + return -EINVAL; + + ets->ets_cap = IEEE_8021QAZ_MAX_TCS; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + + return 0; +} + +static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) +{ + int i; + int total_ets_bw = 0; + int has_ets_tc = 0; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) { + en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", + i, ets->prio_tc[i]); + return -EINVAL; + } + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + has_ets_tc = 1; + total_ets_bw += ets->tc_tx_bw[i]; + break; + default: + en_err(priv, "TC[%d]: Not supported TSA: %d\n", + i, ets->tc_tsa[i]); + return -ENOTSUPP; + } + } + + if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) { + en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n", + total_ets_bw); + return -EINVAL; + } + + return 0; +} + +static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, + struct ieee_ets *ets, u16 *ratelimit) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int num_strict = 0; + int i; + __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 }; + __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 }; + + ets = ets ?: &priv->ets; + ratelimit = ratelimit ?: priv->maxrate; + + /* higher TC means higher priority => lower pg */ + for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + pg[i] = num_strict++; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; + case IEEE_8021QAZ_TSA_ETS: + pg[i] = MLX4_EN_TC_ETS; + tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN; + break; + } + } + + return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg, + ratelimit); +} + +static int +mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + err = mlx4_en_ets_validate(priv, ets); + if (err) + return err; + + err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc); + if (err) + return err; + + err = mlx4_en_config_port_scheduler(priv, ets, NULL); + if (err) + return err; + + memcpy(&priv->ets, ets, sizeof(priv->ets)); + + return 0; +} + +static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + pfc->pfc_en = priv->prof->tx_ppp; + + return 0; +} + +static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_port_profile *prof = priv->prof; + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", + pfc->pfc_cap, + pfc->pfc_en, + pfc->mbc, + pfc->delay); + + prof->rx_pause = !pfc->pfc_en; + prof->tx_pause = !pfc->pfc_en; + prof->rx_ppp = pfc->pfc_en; + prof->tx_ppp = pfc->pfc_en; + + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + prof->tx_pause, + prof->tx_ppp, + prof->rx_pause, + prof->rx_ppp); + if (err) + en_err(priv, "Failed setting pause params\n"); + else + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + prof->rx_ppp, prof->rx_pause, + prof->tx_ppp, prof->tx_pause); + + return err; +} + +static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + return 0; +} + +#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ +static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + maxrate->tc_maxrate[i] = + priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; + + return 0; +} + +static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 tmp[IEEE_8021QAZ_MAX_TCS]; + int i, err; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + /* Convert from Kbps into HW units, rounding result up. + * Setting to 0, means unlimited BW. + */ + tmp[i] = div_u64(maxrate->tc_maxrate[i] + + MLX4_RATELIMIT_UNITS_IN_KB - 1, + MLX4_RATELIMIT_UNITS_IN_KB); + } + + err = mlx4_en_config_port_scheduler(priv, NULL, tmp); + if (err) + return err; + + memcpy(priv->maxrate, tmp, sizeof(priv->maxrate)); + + return 0; +} + +#define RPG_ENABLE_BIT 31 +#define CN_TAG_BIT 30 + +static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, + inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + + qcn->rpg_enable[i] = + be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT; + qcn->rppp_max_rps[i] = + be32_to_cpu(hw_qcn->rppp_max_rps); + qcn->rpg_time_reset[i] = + be32_to_cpu(hw_qcn->rpg_time_reset); + qcn->rpg_byte_reset[i] = + be32_to_cpu(hw_qcn->rpg_byte_reset); + qcn->rpg_threshold[i] = + be32_to_cpu(hw_qcn->rpg_threshold); + qcn->rpg_max_rate[i] = + be32_to_cpu(hw_qcn->rpg_max_rate); + qcn->rpg_ai_rate[i] = + be32_to_cpu(hw_qcn->rpg_ai_rate); + qcn->rpg_hai_rate[i] = + be32_to_cpu(hw_qcn->rpg_hai_rate); + qcn->rpg_gd[i] = + be32_to_cpu(hw_qcn->rpg_gd); + qcn->rpg_min_dec_fac[i] = + be32_to_cpu(hw_qcn->rpg_min_dec_fac); + qcn->rpg_min_rate[i] = + be32_to_cpu(hw_qcn->rpg_min_rate); + qcn->cndd_state_machine[i] = + priv->cndd_state[i]; + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + +static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_in = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; +#define MODIFY_ENABLE_HIGH_MASK 0xc0000000 +#define MODIFY_ENABLE_LOW_MASK 0xffc00000 + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_in)) + return -ENOMEM; + + mailbox_in_dma = mailbox_in->dma; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + + /* Before updating QCN parameter, + * need to set it's modify enable bit to 1 + */ + + hw_qcn->modify_enable_high = cpu_to_be32( + MODIFY_ENABLE_HIGH_MASK); + hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK); + + hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT); + hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]); + hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]); + hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]); + hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]); + hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]); + hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]); + hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]); + hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]); + hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]); + hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]); + priv->cndd_state[i] = qcn->cndd_state_machine[i]; + if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY) + hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT); + + err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod, + MLX4_CONGESTION_CONTROL_SET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return err; + } + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return 0; +} + +static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, + struct ieee_qcn_stats *qcn_stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + + hw_qcn_stats = + (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, inmod, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + qcn_stats->rppp_rp_centiseconds[i] = + be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds); + qcn_stats->rppp_created_rps[i] = + be32_to_cpu(hw_qcn_stats->rppp_created_rps); + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + +const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { + .ieee_getets = mlx4_en_dcbnl_ieee_getets, + .ieee_setets = mlx4_en_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getdcbx = mlx4_en_dcbnl_getdcbx, + .setdcbx = mlx4_en_dcbnl_setdcbx, + .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, + .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, + .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, +}; + +const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getdcbx = mlx4_en_dcbnl_getdcbx, + .setdcbx = mlx4_en_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c new file mode 100644 index 000000000..a2ddf3d75 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -0,0 +1,2011 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/mlx4/driver.h> +#include <linux/mlx4/device.h> +#include <linux/in.h> +#include <net/ip.h> +#include <linux/bitmap.h> + +#include "mlx4_en.h" +#include "en_port.h" + +#define EN_ETHTOOL_QP_ATTACH (1ull << 63) +#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) +#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) + +static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) +{ + int i; + int err = 0; + + for (i = 0; i < priv->tx_ring_num; i++) { + priv->tx_cq[i]->moder_cnt = priv->tx_frames; + priv->tx_cq[i]->moder_time = priv->tx_usecs; + if (priv->port_up) { + err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]); + if (err) + return err; + } + } + + if (priv->adaptive_rx_coal) + return 0; + + for (i = 0; i < priv->rx_ring_num; i++) { + priv->rx_cq[i]->moder_cnt = priv->rx_frames; + priv->rx_cq[i]->moder_time = priv->rx_usecs; + priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; + if (priv->port_up) { + err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]); + if (err) + return err; + } + } + + return err; +} + +static void +mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + (u16) (mdev->dev->caps.fw_ver >> 32), + (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), + (u16) (mdev->dev->caps.fw_ver & 0xffff)); + strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { + "blueflame", +}; + +static const char main_strings[][ETH_GSTRING_LEN] = { + /* main statistics */ + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + + /* port statistics */ + "tso_packets", + "xmit_more", + "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", + "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", + + /* priority flow control statistics rx */ + "rx_pause_prio_0", "rx_pause_duration_prio_0", + "rx_pause_transition_prio_0", + "rx_pause_prio_1", "rx_pause_duration_prio_1", + "rx_pause_transition_prio_1", + "rx_pause_prio_2", "rx_pause_duration_prio_2", + "rx_pause_transition_prio_2", + "rx_pause_prio_3", "rx_pause_duration_prio_3", + "rx_pause_transition_prio_3", + "rx_pause_prio_4", "rx_pause_duration_prio_4", + "rx_pause_transition_prio_4", + "rx_pause_prio_5", "rx_pause_duration_prio_5", + "rx_pause_transition_prio_5", + "rx_pause_prio_6", "rx_pause_duration_prio_6", + "rx_pause_transition_prio_6", + "rx_pause_prio_7", "rx_pause_duration_prio_7", + "rx_pause_transition_prio_7", + + /* flow control statistics rx */ + "rx_pause", "rx_pause_duration", "rx_pause_transition", + + /* priority flow control statistics tx */ + "tx_pause_prio_0", "tx_pause_duration_prio_0", + "tx_pause_transition_prio_0", + "tx_pause_prio_1", "tx_pause_duration_prio_1", + "tx_pause_transition_prio_1", + "tx_pause_prio_2", "tx_pause_duration_prio_2", + "tx_pause_transition_prio_2", + "tx_pause_prio_3", "tx_pause_duration_prio_3", + "tx_pause_transition_prio_3", + "tx_pause_prio_4", "tx_pause_duration_prio_4", + "tx_pause_transition_prio_4", + "tx_pause_prio_5", "tx_pause_duration_prio_5", + "tx_pause_transition_prio_5", + "tx_pause_prio_6", "tx_pause_duration_prio_6", + "tx_pause_transition_prio_6", + "tx_pause_prio_7", "tx_pause_duration_prio_7", + "tx_pause_transition_prio_7", + + /* flow control statistics tx */ + "tx_pause", "tx_pause_duration", "tx_pause_transition", + + /* packet statistics */ + "rx_multicast_packets", + "rx_broadcast_packets", + "rx_jabbers", + "rx_in_range_length_error", + "rx_out_range_length_error", + "tx_multicast_packets", + "tx_broadcast_packets", + "rx_prio_0_packets", "rx_prio_0_bytes", + "rx_prio_1_packets", "rx_prio_1_bytes", + "rx_prio_2_packets", "rx_prio_2_bytes", + "rx_prio_3_packets", "rx_prio_3_bytes", + "rx_prio_4_packets", "rx_prio_4_bytes", + "rx_prio_5_packets", "rx_prio_5_bytes", + "rx_prio_6_packets", "rx_prio_6_bytes", + "rx_prio_7_packets", "rx_prio_7_bytes", + "rx_novlan_packets", "rx_novlan_bytes", + "tx_prio_0_packets", "tx_prio_0_bytes", + "tx_prio_1_packets", "tx_prio_1_bytes", + "tx_prio_2_packets", "tx_prio_2_bytes", + "tx_prio_3_packets", "tx_prio_3_bytes", + "tx_prio_4_packets", "tx_prio_4_bytes", + "tx_prio_5_packets", "tx_prio_5_bytes", + "tx_prio_6_packets", "tx_prio_6_bytes", + "tx_prio_7_packets", "tx_prio_7_bytes", + "tx_novlan_packets", "tx_novlan_bytes", + +}; + +static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { + "Interrupt Test", + "Link Test", + "Speed Test", + "Register Test", + "Loopback Test", +}; + +static u32 mlx4_en_get_msglevel(struct net_device *dev) +{ + return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; +} + +static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) +{ + ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val; +} + +static void mlx4_en_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + int err = 0; + u64 config = 0; + u64 mask; + + if ((priv->port < 1) || (priv->port > 2)) { + en_err(priv, "Failed to get WoL information\n"); + return; + } + + mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : + MLX4_DEV_CAP_FLAG_WOL_PORT2; + + if (!(priv->mdev->dev->caps.flags & mask)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } + + err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); + if (err) { + en_err(priv, "Failed to get WoL information\n"); + return; + } + + if (config & MLX4_EN_WOL_MAGIC) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + + if (config & MLX4_EN_WOL_ENABLED) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; +} + +static int mlx4_en_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + u64 config = 0; + int err = 0; + u64 mask; + + if ((priv->port < 1) || (priv->port > 2)) + return -EOPNOTSUPP; + + mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : + MLX4_DEV_CAP_FLAG_WOL_PORT2; + + if (!(priv->mdev->dev->caps.flags & mask)) + return -EOPNOTSUPP; + + if (wol->supported & ~WAKE_MAGIC) + return -EINVAL; + + err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); + if (err) { + en_err(priv, "Failed to get WoL info, unable to modify\n"); + return err; + } + + if (wol->wolopts & WAKE_MAGIC) { + config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | + MLX4_EN_WOL_MAGIC; + } else { + config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); + config |= MLX4_EN_WOL_DO_MODIFY; + } + + err = mlx4_wol_write(priv->mdev->dev, config, priv->port); + if (err) + en_err(priv, "Failed to set WoL information\n"); + + return err; +} + +struct bitmap_iterator { + unsigned long *stats_bitmap; + unsigned int count; + unsigned int iterator; + bool advance_array; /* if set, force no increments */ +}; + +static inline void bitmap_iterator_init(struct bitmap_iterator *h, + unsigned long *stats_bitmap, + int count) +{ + h->iterator = 0; + h->advance_array = !bitmap_empty(stats_bitmap, count); + h->count = h->advance_array ? bitmap_weight(stats_bitmap, count) + : count; + h->stats_bitmap = stats_bitmap; +} + +static inline int bitmap_iterator_test(struct bitmap_iterator *h) +{ + return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap); +} + +static inline int bitmap_iterator_inc(struct bitmap_iterator *h) +{ + return h->iterator++; +} + +static inline unsigned int +bitmap_iterator_count(struct bitmap_iterator *h) +{ + return h->count; +} + +static int mlx4_en_get_sset_count(struct net_device *dev, int sset) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); + + switch (sset) { + case ETH_SS_STATS: + return bitmap_iterator_count(&it) + + (priv->tx_ring_num * 2) + +#ifdef CONFIG_NET_RX_BUSY_POLL + (priv->rx_ring_num * 5); +#else + (priv->rx_ring_num * 2); +#endif + case ETH_SS_TEST: + return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags + & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx4_en_priv_flags); + default: + return -EOPNOTSUPP; + } +} + +static void mlx4_en_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int index = 0; + int i; + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); + + spin_lock_bh(&priv->stats_lock); + + for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->stats)[i]; + + for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->port_stats)[i]; + + for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX; + i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = + ((u64 *)&priv->rx_priority_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((u64 *)&priv->rx_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX; + i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = + ((u64 *)&priv->tx_priority_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((u64 *)&priv->tx_flowstats)[i]; + + for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->pkstats)[i]; + + for (i = 0; i < priv->tx_ring_num; i++) { + data[index++] = priv->tx_ring[i]->packets; + data[index++] = priv->tx_ring[i]->bytes; + } + for (i = 0; i < priv->rx_ring_num; i++) { + data[index++] = priv->rx_ring[i]->packets; + data[index++] = priv->rx_ring[i]->bytes; +#ifdef CONFIG_NET_RX_BUSY_POLL + data[index++] = priv->rx_ring[i]->yields; + data[index++] = priv->rx_ring[i]->misses; + data[index++] = priv->rx_ring[i]->cleaned; +#endif + } + spin_unlock_bh(&priv->stats_lock); + +} + +static void mlx4_en_self_test(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + mlx4_en_ex_selftest(dev, &etest->flags, buf); +} + +static void mlx4_en_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int index = 0; + int i, strings = 0; + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); + + switch (stringset) { + case ETH_SS_TEST: + for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) + for (; i < MLX4_EN_NUM_SELF_TEST; i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + break; + + case ETH_SS_STATS: + /* Add main counters */ + for (i = 0; i < NUM_MAIN_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < NUM_PORT_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < NUM_FLOW_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < NUM_PKT_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < priv->tx_ring_num; i++) { + sprintf(data + (index++) * ETH_GSTRING_LEN, + "tx%d_packets", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "tx%d_bytes", i); + } + for (i = 0; i < priv->rx_ring_num; i++) { + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_packets", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_bytes", i); +#ifdef CONFIG_NET_RX_BUSY_POLL + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_napi_yield", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_misses", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_cleaned", i); +#endif + } + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, + mlx4_en_priv_flags[i]); + break; + + } +} + +static u32 mlx4_en_autoneg_get(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + u32 autoneg = AUTONEG_DISABLE; + + if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && + (priv->port_state.flags & MLX4_EN_PORT_ANE)) + autoneg = AUTONEG_ENABLE; + + return autoneg; +} + +static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return SUPPORTED_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); + + if (!eth_proto) /* link down */ + eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return PORT_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_56GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return PORT_NONE; + } + return PORT_OTHER; +} + +#define MLX4_LINK_MODES_SZ \ + (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) + +enum ethtool_report { + SUPPORTED = 0, + ADVERTISED = 1, + SPEED = 2 +}; + +/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ +static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { + [MLX4_100BASE_TX] = { + SUPPORTED_100baseT_Full, + ADVERTISED_100baseT_Full, + SPEED_100 + }, + + [MLX4_1000BASE_T] = { + SUPPORTED_1000baseT_Full, + ADVERTISED_1000baseT_Full, + SPEED_1000 + }, + [MLX4_1000BASE_CX_SGMII] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + [MLX4_1000BASE_KX] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + + [MLX4_10GBASE_T] = { + SUPPORTED_10000baseT_Full, + ADVERTISED_10000baseT_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_SR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + + [MLX4_20GBASE_KR2] = { + SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, + ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, + SPEED_20000 + }, + + [MLX4_40GBASE_CR4] = { + SUPPORTED_40000baseCR4_Full, + ADVERTISED_40000baseCR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_KR4] = { + SUPPORTED_40000baseKR4_Full, + ADVERTISED_40000baseKR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_SR4] = { + SUPPORTED_40000baseSR4_Full, + ADVERTISED_40000baseSR4_Full, + SPEED_40000 + }, + + [MLX4_56GBASE_KR4] = { + SUPPORTED_56000baseKR4_Full, + ADVERTISED_56000baseKR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_CR4] = { + SUPPORTED_56000baseCR4_Full, + ADVERTISED_56000baseCR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_SR4] = { + SUPPORTED_56000baseSR4_Full, + ADVERTISED_56000baseSR4_Full, + SPEED_56000 + }, +}; + +static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) +{ + int i; + u32 link_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (eth_proto & MLX4_PROT_MASK(i)) + link_modes |= ptys2ethtool_map[i][report]; + } + return link_modes; +} + +static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][report] & link_modes) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +/* Convert actual speed (SPEED_XXX) to ptys link modes */ +static u32 speed2ptys_link_modes(u32 speed) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][SPEED] == speed) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +static int ethtool_get_ptys_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + u32 eth_proto; + int ret; + + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", + ret); + return ret; + } + en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", + ptys_reg.proto_mask); + en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", + be32_to_cpu(ptys_reg.eth_proto_cap)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", + be32_to_cpu(ptys_reg.eth_proto_admin)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", + be32_to_cpu(ptys_reg.eth_proto_oper)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", + be32_to_cpu(ptys_reg.eth_proto_lp_adv)); + + cmd->supported = 0; + cmd->advertising = 0; + + cmd->supported |= ptys_get_supported_port(&ptys_reg); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); + cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); + cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; + + cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? + ADVERTISED_Asym_Pause : 0; + + cmd->port = ptys_get_active_port(&ptys_reg); + cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + + if (mlx4_en_autoneg_get(dev)) { + cmd->supported |= SUPPORTED_Autoneg; + cmd->advertising |= ADVERTISED_Autoneg; + } + + cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); + cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + ADVERTISED_Autoneg : 0; + + cmd->phy_address = 0; + cmd->mdio_support = 0; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + + return ret; +} + +static void ethtool_get_default_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int trans_type; + + cmd->autoneg = AUTONEG_DISABLE; + cmd->supported = SUPPORTED_10000baseT_Full; + cmd->advertising = ADVERTISED_10000baseT_Full; + trans_type = priv->port_state.transceiver; + + if (trans_type > 0 && trans_type <= 0xC) { + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_EXTERNAL; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + } else if (trans_type == 0x80 || trans_type == 0) { + cmd->port = PORT_TP; + cmd->transceiver = XCVR_INTERNAL; + cmd->supported |= SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + } else { + cmd->port = -1; + cmd->transceiver = -1; + } +} + +static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = -EINVAL; + + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + + en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", + priv->port_state.flags & MLX4_EN_PORT_ANC, + priv->port_state.flags & MLX4_EN_PORT_ANE); + + if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) + ret = ethtool_get_ptys_settings(dev, cmd); + if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ + ethtool_get_default_settings(dev, cmd); + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); + cmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } + return 0; +} + +/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ +static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, + __be32 proto_cap) +{ + __be32 proto_admin = 0; + + if (!speed) { /* Speed = 0 ==> Reset Link modes */ + proto_admin = proto_cap; + en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", + be32_to_cpu(proto_cap)); + } else { + u32 ptys_link_modes = speed2ptys_link_modes(speed); + + proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; + en_info(priv, "Setting Speed to %d\n", speed); + } + return proto_admin; +} + +static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + __be32 proto_admin; + int ret; + + u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); + int speed = ethtool_cmd_speed(cmd); + + en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", + speed, cmd->advertising, cmd->autoneg, cmd->duplex); + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || + (cmd->duplex == DUPLEX_HALF)) + return -EINVAL; + + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", + ret); + return 0; + } + + proto_admin = cmd->autoneg == AUTONEG_ENABLE ? + cpu_to_be32(ptys_adv) : + speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + + proto_admin &= ptys_reg.eth_proto_cap; + if (!proto_admin) { + en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); + return -EINVAL; /* nothing to change due to bad input */ + } + + if (proto_admin == ptys_reg.eth_proto_admin) + return 0; /* Nothing to change */ + + en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", + be32_to_cpu(proto_admin)); + + ptys_reg.eth_proto_admin = proto_admin; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, + &ptys_reg); + if (ret) { + en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", + be32_to_cpu(ptys_reg.eth_proto_admin), ret); + return ret; + } + + mutex_lock(&priv->mdev->state_lock); + if (priv->port_up) { + en_warn(priv, "Port link mode changed, restarting port...\n"); + mlx4_en_stop_port(dev, 1); + if (mlx4_en_start_port(dev)) + en_err(priv, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&priv->mdev->state_lock); + return 0; +} + +static int mlx4_en_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + coal->tx_coalesce_usecs = priv->tx_usecs; + coal->tx_max_coalesced_frames = priv->tx_frames; + coal->tx_max_coalesced_frames_irq = priv->tx_work_limit; + + coal->rx_coalesce_usecs = priv->rx_usecs; + coal->rx_max_coalesced_frames = priv->rx_frames; + + coal->pkt_rate_low = priv->pkt_rate_low; + coal->rx_coalesce_usecs_low = priv->rx_usecs_low; + coal->pkt_rate_high = priv->pkt_rate_high; + coal->rx_coalesce_usecs_high = priv->rx_usecs_high; + coal->rate_sample_interval = priv->sample_interval; + coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; + + return 0; +} + +static int mlx4_en_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (!coal->tx_max_coalesced_frames_irq) + return -EINVAL; + + priv->rx_frames = (coal->rx_max_coalesced_frames == + MLX4_EN_AUTO_CONF) ? + MLX4_EN_RX_COAL_TARGET : + coal->rx_max_coalesced_frames; + priv->rx_usecs = (coal->rx_coalesce_usecs == + MLX4_EN_AUTO_CONF) ? + MLX4_EN_RX_COAL_TIME : + coal->rx_coalesce_usecs; + + /* Setting TX coalescing parameters */ + if (coal->tx_coalesce_usecs != priv->tx_usecs || + coal->tx_max_coalesced_frames != priv->tx_frames) { + priv->tx_usecs = coal->tx_coalesce_usecs; + priv->tx_frames = coal->tx_max_coalesced_frames; + } + + /* Set adaptive coalescing params */ + priv->pkt_rate_low = coal->pkt_rate_low; + priv->rx_usecs_low = coal->rx_coalesce_usecs_low; + priv->pkt_rate_high = coal->pkt_rate_high; + priv->rx_usecs_high = coal->rx_coalesce_usecs_high; + priv->sample_interval = coal->rate_sample_interval; + priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + priv->tx_work_limit = coal->tx_max_coalesced_frames_irq; + + return mlx4_en_moderation_update(priv); +} + +static int mlx4_en_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + if (pause->autoneg) + return -EINVAL; + + priv->prof->tx_pause = pause->tx_pause != 0; + priv->prof->rx_pause = pause->rx_pause != 0; + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp); + if (err) + en_err(priv, "Failed setting pause params\n"); + else + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + priv->prof->rx_ppp, + priv->prof->rx_pause, + priv->prof->tx_ppp, + priv->prof->tx_pause); + + return err; +} + +static void mlx4_en_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + pause->tx_pause = priv->prof->tx_pause; + pause->rx_pause = priv->prof->rx_pause; +} + +static int mlx4_en_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + u32 rx_size, tx_size; + int port_up = 0; + int err = 0; + + if (param->rx_jumbo_pending || param->rx_mini_pending) + return -EINVAL; + + rx_size = roundup_pow_of_two(param->rx_pending); + rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); + rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); + tx_size = roundup_pow_of_two(param->tx_pending); + tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); + tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); + + if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : + priv->rx_ring[0]->size) && + tx_size == priv->tx_ring[0]->size) + return 0; + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + priv->prof->tx_ring_size = tx_size; + priv->prof->rx_ring_size = rx_size; + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + + err = mlx4_en_moderation_update(priv); + +out: + mutex_unlock(&mdev->state_lock); + return err; +} + +static void mlx4_en_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + memset(param, 0, sizeof(*param)); + param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; + param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; + param->rx_pending = priv->port_up ? + priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; + param->tx_pending = priv->tx_ring[0]->size; +} + +static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->rx_ring_num; +} + +static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) +{ + return MLX4_EN_RSS_KEY_SIZE; +} + +static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + /* check if requested function is supported by the device */ + if (hfunc == ETH_RSS_HASH_TOP) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) + return -EINVAL; + if (!(dev->features & NETIF_F_RXHASH)) + en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); + return 0; + } else if (hfunc == ETH_RSS_HASH_XOR) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) + return -EINVAL; + if (dev->features & NETIF_F_RXHASH) + en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); + return 0; + } + + return -EINVAL; +} + +static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, + u8 *hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rss_map *rss_map = &priv->rss_map; + int rss_rings; + size_t n = priv->rx_ring_num; + int err = 0; + + rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; + rss_rings = 1 << ilog2(rss_rings); + + while (n--) { + if (!ring_index) + break; + ring_index[n] = rss_map->qps[n % rss_rings].qpn - + rss_map->base_qpn; + } + if (key) + memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc) + *hfunc = priv->rss_hash_fn; + return err; +} + +static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, + const u8 *key, const u8 hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + int i; + int rss_rings = 0; + + /* Calculate RSS table size and make sure flows are spread evenly + * between rings + */ + for (i = 0; i < priv->rx_ring_num; i++) { + if (!ring_index) + continue; + if (i > 0 && !ring_index[i] && !rss_rings) + rss_rings = i; + + if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) + return -EINVAL; + } + + if (!rss_rings) + rss_rings = priv->rx_ring_num; + + /* RSS table size must be an order of 2 */ + if (!is_power_of_2(rss_rings)) + return -EINVAL; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + err = mlx4_en_check_rxfh_func(dev, hfunc); + if (err) + return err; + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + if (ring_index) + priv->prof->rss_rings = rss_rings; + if (key) + memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc != ETH_RSS_HASH_NO_CHANGE) + priv->rss_hash_fn = hfunc; + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + + mutex_unlock(&mdev->state_lock); + return err; +} + +#define all_zeros_or_all_ones(field) \ + ((field) == 0 || (field) == (__force typeof(field))-1) + +static int mlx4_en_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l3_mask; + struct ethtool_tcpip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + if (cmd->fs.flow_type & FLOW_MAC_EXT) { + /* dest mac mask must be ff:ff:ff:ff:ff:ff */ + if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest)) + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (cmd->fs.m_u.tcp_ip4_spec.tos) + return -EINVAL; + l4_mask = &cmd->fs.m_u.tcp_ip4_spec; + /* don't allow mask which isn't all 0 or 1 */ + if (!all_zeros_or_all_ones(l4_mask->ip4src) || + !all_zeros_or_all_ones(l4_mask->ip4dst) || + !all_zeros_or_all_ones(l4_mask->psrc) || + !all_zeros_or_all_ones(l4_mask->pdst)) + return -EINVAL; + break; + case IP_USER_FLOW: + l3_mask = &cmd->fs.m_u.usr_ip4_spec; + if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || + cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 || + (!l3_mask->ip4src && !l3_mask->ip4dst) || + !all_zeros_or_all_ones(l3_mask->ip4src) || + !all_zeros_or_all_ones(l3_mask->ip4dst)) + return -EINVAL; + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* source mac mask must not be set */ + if (!is_zero_ether_addr(eth_mask->h_source)) + return -EINVAL; + + /* dest mac mask must be ff:ff:ff:ff:ff:ff */ + if (!is_broadcast_ether_addr(eth_mask->h_dest)) + return -EINVAL; + + if (!all_zeros_or_all_ones(eth_mask->h_proto)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + if (cmd->fs.m_ext.vlan_etype || + !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == + 0 || + (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == + cpu_to_be16(VLAN_VID_MASK))) + return -EINVAL; + + if (cmd->fs.m_ext.vlan_tci) { + if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + + } + } + + return 0; +} + +static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h, + struct mlx4_spec_list *spec_l2, + unsigned char *mac) +{ + int err = 0; + __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + + spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); + memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN); + + if ((cmd->fs.flow_type & FLOW_EXT) && + (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { + spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci; + spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK); + } + + list_add_tail(&spec_l2->list, rule_list_h); + + return err; +} + +static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h, + struct mlx4_spec_list *spec_l2, + __be32 ipv4_dst) +{ +#ifdef CONFIG_INET + unsigned char mac[ETH_ALEN]; + + if (!ipv4_is_multicast(ipv4_dst)) { + if (cmd->fs.flow_type & FLOW_MAC_EXT) + memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN); + else + memcpy(&mac, priv->dev->dev_addr, ETH_ALEN); + } else { + ip_eth_mc_map(ipv4_dst, mac); + } + + return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]); +#else + return -EINVAL; +#endif +} + +static int add_ip_rule(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *list_h) +{ + int err; + struct mlx4_spec_list *spec_l2 = NULL; + struct mlx4_spec_list *spec_l3 = NULL; + struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; + + spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + if (!spec_l2 || !spec_l3) { + err = -ENOMEM; + goto free_spec; + } + + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2, + cmd->fs.h_u. + usr_ip4_spec.ip4dst); + if (err) + goto free_spec; + spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; + spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; + if (l3_mask->ip4src) + spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst; + if (l3_mask->ip4dst) + spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; + list_add_tail(&spec_l3->list, list_h); + + return 0; + +free_spec: + kfree(spec_l2); + kfree(spec_l3); + return err; +} + +static int add_tcp_udp_rule(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *list_h, int proto) +{ + int err; + struct mlx4_spec_list *spec_l2 = NULL; + struct mlx4_spec_list *spec_l3 = NULL; + struct mlx4_spec_list *spec_l4 = NULL; + struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; + + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); + spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL); + if (!spec_l2 || !spec_l3 || !spec_l4) { + err = -ENOMEM; + goto free_spec; + } + + spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; + + if (proto == TCP_V4_FLOW) { + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, + spec_l2, + cmd->fs.h_u. + tcp_ip4_spec.ip4dst); + if (err) + goto free_spec; + spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; + spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; + spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; + spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; + } else { + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, + spec_l2, + cmd->fs.h_u. + udp_ip4_spec.ip4dst); + if (err) + goto free_spec; + spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; + spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; + spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc; + spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst; + } + + if (l4_mask->ip4src) + spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; + if (l4_mask->ip4dst) + spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; + + if (l4_mask->psrc) + spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK; + if (l4_mask->pdst) + spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK; + + list_add_tail(&spec_l3->list, list_h); + list_add_tail(&spec_l4->list, list_h); + + return 0; + +free_spec: + kfree(spec_l2); + kfree(spec_l3); + kfree(spec_l4); + return err; +} + +static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, + struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h) +{ + int err; + struct ethhdr *eth_spec; + struct mlx4_spec_list *spec_l2; + struct mlx4_en_priv *priv = netdev_priv(dev); + + err = mlx4_en_validate_flow(dev, cmd); + if (err) + return err; + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + if (!spec_l2) + return -ENOMEM; + + eth_spec = &cmd->fs.h_u.ether_spec; + mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, + ð_spec->h_dest[0]); + spec_l2->eth.ether_type = eth_spec->h_proto; + if (eth_spec->h_proto) + spec_l2->eth.ether_type_enable = 1; + break; + case IP_USER_FLOW: + err = add_ip_rule(priv, cmd, rule_list_h); + break; + case TCP_V4_FLOW: + err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW); + break; + case UDP_V4_FLOW: + err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW); + break; + } + + return err; +} + +static int mlx4_en_flow_replace(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + int err; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ethtool_flow_id *loc_rule; + struct mlx4_spec_list *spec, *tmp_spec; + u32 qpn; + u64 reg_id; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + }; + + rule.port = priv->port; + rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location; + INIT_LIST_HEAD(&rule.list); + + /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */ + if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC) + qpn = priv->drop_qp.qpn; + else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { + qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); + } else { + if (cmd->fs.ring_cookie >= priv->rx_ring_num) { + en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; + if (!qpn) { + en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + } + rule.qpn = qpn; + err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list); + if (err) + goto out_free_list; + + loc_rule = &priv->ethtool_rules[cmd->fs.location]; + if (loc_rule->id) { + err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id); + if (err) { + en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n", + cmd->fs.location, loc_rule->id); + goto out_free_list; + } + loc_rule->id = 0; + memset(&loc_rule->flow_spec, 0, + sizeof(struct ethtool_rx_flow_spec)); + list_del(&loc_rule->list); + } + err = mlx4_flow_attach(priv->mdev->dev, &rule, ®_id); + if (err) { + en_err(priv, "Fail to attach network rule at location %d\n", + cmd->fs.location); + goto out_free_list; + } + loc_rule->id = reg_id; + memcpy(&loc_rule->flow_spec, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + list_add_tail(&loc_rule->list, &priv->ethtool_list); + +out_free_list: + list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { + list_del(&spec->list); + kfree(spec); + } + return err; +} + +static int mlx4_en_flow_detach(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct ethtool_flow_id *rule; + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->ethtool_rules[cmd->fs.location]; + if (!rule->id) { + err = -ENOENT; + goto out; + } + + err = mlx4_flow_detach(priv->mdev->dev, rule->id); + if (err) { + en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n", + cmd->fs.location, rule->id); + goto out; + } + rule->id = 0; + memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); + list_del(&rule->list); +out: + return err; + +} + +static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + int err = 0; + struct ethtool_flow_id *rule; + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->ethtool_rules[loc]; + if (rule->id) + memcpy(&cmd->fs, &rule->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + else + err = -ENOENT; + + return err; +} + +static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv) +{ + + int i, res = 0; + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + if (priv->ethtool_rules[i].id) + res++; + } + return res; + +} + +static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + int i = 0, priority = 0; + + if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || + cmd->cmd == ETHTOOL_GRXCLSRULE || + cmd->cmd == ETHTOOL_GRXCLSRLALL) && + (mdev->dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)) + return -EINVAL; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->rx_ring_num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = mlx4_en_get_num_flows(priv); + break; + case ETHTOOL_GRXCLSRULE: + err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { + err = mlx4_en_get_flow(dev, cmd, i); + if (!err) + rule_locs[priority++] = i; + i++; + } + err = 0; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (mdev->dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up) + return -EINVAL; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = mlx4_en_flow_replace(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = mlx4_en_flow_detach(dev, cmd); + break; + default: + en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd); + return -EINVAL; + } + + return err; +} + +static void mlx4_en_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + memset(channel, 0, sizeof(*channel)); + + channel->max_rx = MAX_RX_RINGS; + channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; + + channel->rx_count = priv->rx_ring_num; + channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP; +} + +static int mlx4_en_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + if (channel->other_count || channel->combined_count || + channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP || + channel->rx_count > MAX_RX_RINGS || + !channel->tx_count || !channel->rx_count) + return -EINVAL; + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + priv->num_tx_rings_p_up = channel->tx_count; + priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; + priv->rx_ring_num = channel->rx_count; + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + + netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + + if (dev->num_tc) + mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); + + en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); + en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + + err = mlx4_en_moderation_update(priv); + +out: + mutex_unlock(&mdev->state_lock); + return err; +} + +static int mlx4_en_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + + ret = ethtool_op_get_ts_info(dev, info); + if (ret) + return ret; + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { + info->so_timestamping |= + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + if (mdev->ptp_clock) + info->phc_index = ptp_clock_index(mdev->ptp_clock); + } + + return ret; +} + +static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + int i; + + if (bf_enabled_new == bf_enabled_old) + return 0; /* Nothing to do */ + + if (bf_enabled_new) { + bool bf_supported = true; + + for (i = 0; i < priv->tx_ring_num; i++) + bf_supported &= priv->tx_ring[i]->bf_alloced; + + if (!bf_supported) { + en_err(priv, "BlueFlame is not supported\n"); + return -EINVAL; + } + + priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } + + for (i = 0; i < priv->tx_ring_num; i++) + priv->tx_ring[i]->bf_enabled = bf_enabled_new; + + en_info(priv, "BlueFlame %s\n", + bf_enabled_new ? "Enabled" : "Disabled"); + + return 0; +} + +static u32 mlx4_en_get_priv_flags(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->pflags; +} + +static int mlx4_en_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + void *data) +{ + const struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + *(u32 *)data = priv->prof->inline_thold; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mlx4_en_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int val, ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + val = *(u32 *)data; + if (val < MIN_PKT_LEN || val > MAX_INLINE) + ret = -EINVAL; + else + priv->prof->inline_thold = val; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mlx4_en_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + u8 data[4]; + + /* Read first 2 bytes to get Module & REV ID */ + ret = mlx4_get_module_info(mdev->dev, priv->port, + 0/*offset*/, 2/*size*/, data); + if (ret < 2) + return -EIO; + + switch (data[0] /* identifier */) { + case MLX4_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLX4_MODULE_ID_QSFP_PLUS: + if (data[1] >= 0x3) { /* revision id */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLX4_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + case MLX4_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + return -ENOSYS; + } + + return 0; +} + +static int mlx4_en_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int offset = ee->offset; + int i = 0, ret; + + if (ee->len == 0) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + en_dbg(DRV, priv, + "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", + i, offset, ee->len - i); + + ret = mlx4_get_module_info(mdev->dev, priv->port, + offset, ee->len - i, data + i); + + if (!ret) /* Done reading */ + return 0; + + if (ret < 0) { + en_err(priv, + "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", + i, offset, ee->len - i, ret); + return 0; + } + + i += ret; + offset += ret; + } + return 0; +} + +static int mlx4_en_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + int err; + u16 beacon_duration; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON)) + return -EOPNOTSUPP; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + beacon_duration = PORT_BEACON_MAX_LIMIT; + break; + case ETHTOOL_ID_INACTIVE: + beacon_duration = 0; + break; + default: + return -EOPNOTSUPP; + } + + err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration); + return err; +} + +const struct ethtool_ops mlx4_en_ethtool_ops = { + .get_drvinfo = mlx4_en_get_drvinfo, + .get_settings = mlx4_en_get_settings, + .set_settings = mlx4_en_set_settings, + .get_link = ethtool_op_get_link, + .get_strings = mlx4_en_get_strings, + .get_sset_count = mlx4_en_get_sset_count, + .get_ethtool_stats = mlx4_en_get_ethtool_stats, + .self_test = mlx4_en_self_test, + .set_phys_id = mlx4_en_set_phys_id, + .get_wol = mlx4_en_get_wol, + .set_wol = mlx4_en_set_wol, + .get_msglevel = mlx4_en_get_msglevel, + .set_msglevel = mlx4_en_set_msglevel, + .get_coalesce = mlx4_en_get_coalesce, + .set_coalesce = mlx4_en_set_coalesce, + .get_pauseparam = mlx4_en_get_pauseparam, + .set_pauseparam = mlx4_en_set_pauseparam, + .get_ringparam = mlx4_en_get_ringparam, + .set_ringparam = mlx4_en_set_ringparam, + .get_rxnfc = mlx4_en_get_rxnfc, + .set_rxnfc = mlx4_en_set_rxnfc, + .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, + .get_rxfh_key_size = mlx4_en_get_rxfh_key_size, + .get_rxfh = mlx4_en_get_rxfh, + .set_rxfh = mlx4_en_set_rxfh, + .get_channels = mlx4_en_get_channels, + .set_channels = mlx4_en_set_channels, + .get_ts_info = mlx4_en_get_ts_info, + .set_priv_flags = mlx4_en_set_priv_flags, + .get_priv_flags = mlx4_en_get_priv_flags, + .get_tunable = mlx4_en_get_tunable, + .set_tunable = mlx4_en_set_tunable, + .get_module_info = mlx4_en_get_module_info, + .get_module_eeprom = mlx4_en_get_module_eeprom +}; + + + + + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c new file mode 100644 index 000000000..913b716ed --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/cpumask.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/slab.h> + +#include <linux/mlx4/driver.h> +#include <linux/mlx4/device.h> +#include <linux/mlx4/cmd.h> + +#include "mlx4_en.h" + +MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin"); +MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")"); + +static const char mlx4_en_version[] = + DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +#define MLX4_EN_PARM_INT(X, def_val, desc) \ + static unsigned int X = def_val;\ + module_param(X , uint, 0444); \ + MODULE_PARM_DESC(X, desc); + + +/* + * Device scope module parameters + */ + +/* Enable RSS UDP traffic */ +MLX4_EN_PARM_INT(udp_rss, 1, + "Enable RSS for incoming UDP traffic or disabled (0)"); + +/* Priority pausing */ +MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." + " Per priority bit mask"); +MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." + " Per priority bit mask"); + +MLX4_EN_PARM_INT(inline_thold, MAX_INLINE, + "Threshold for using inline data (range: 17-104, default: 104)"); + +#define MAX_PFC_TX 0xff +#define MAX_PFC_RX 0xff + +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...) +{ + va_list args; + struct va_format vaf; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + if (priv->registered) + printk("%s%s: %s: %pV", + level, DRV_NAME, priv->dev->name, &vaf); + else + printk("%s%s: %s: Port %d: %pV", + level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), + priv->port, &vaf); + va_end(args); +} + +void mlx4_en_update_loopback_state(struct net_device *dev, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) + priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); + else + priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); + + priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED| + MLX4_EN_FLAG_ENABLE_HW_LOOPBACK); + + /* Drop the packet if SRIOV is not enabled + * and not performing the selftest or flb disabled + */ + if (mlx4_is_mfunc(priv->mdev->dev) && + !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback) + priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED; + + /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest + * is requested + */ + if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback) + priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK; +} + +static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) +{ + struct mlx4_en_profile *params = &mdev->profile; + int i; + + params->udp_rss = udp_rss; + params->num_tx_rings_p_up = mlx4_low_memory_profile() ? + MLX4_EN_MIN_TX_RING_P_UP : + min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP); + + if (params->udp_rss && !(mdev->dev->caps.flags + & MLX4_DEV_CAP_FLAG_UDP_RSS)) { + mlx4_warn(mdev, "UDP RSS is not supported on this device\n"); + params->udp_rss = 0; + } + for (i = 1; i <= MLX4_MAX_PORTS; i++) { + params->prof[i].rx_pause = 1; + params->prof[i].rx_ppp = pfcrx; + params->prof[i].tx_pause = 1; + params->prof[i].tx_ppp = pfctx; + params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + params->prof[i].tx_ring_num = params->num_tx_rings_p_up * + MLX4_EN_NUM_UP; + params->prof[i].rss_rings = 0; + params->prof[i].inline_thold = inline_thold; + } + + return 0; +} + +static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) +{ + struct mlx4_en_dev *endev = ctx; + + return endev->pndev[port]; +} + +static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, + enum mlx4_dev_event event, unsigned long port) +{ + struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; + struct mlx4_en_priv *priv; + + switch (event) { + case MLX4_DEV_EVENT_PORT_UP: + case MLX4_DEV_EVENT_PORT_DOWN: + if (!mdev->pndev[port]) + return; + priv = netdev_priv(mdev->pndev[port]); + /* To prevent races, we poll the link state in a separate + task rather than changing it here */ + priv->link_state = event; + queue_work(mdev->workqueue, &priv->linkstate_task); + break; + + case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: + mlx4_err(mdev, "Internal error detected, restarting device\n"); + break; + + case MLX4_DEV_EVENT_SLAVE_INIT: + case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + break; + default: + if (port < 1 || port > dev->caps.num_ports || + !mdev->pndev[port]) + return; + mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, + (int) port); + } +} + +static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) +{ + struct mlx4_en_dev *mdev = endev_ptr; + int i; + + mutex_lock(&mdev->state_lock); + mdev->device_up = false; + mutex_unlock(&mdev->state_lock); + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + if (mdev->pndev[i]) + mlx4_en_destroy_netdev(mdev->pndev[i]); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_remove_timestamp(mdev); + + flush_workqueue(mdev->workqueue); + destroy_workqueue(mdev->workqueue); + (void) mlx4_mr_free(dev, &mdev->mr); + iounmap(mdev->uar_map); + mlx4_uar_free(dev, &mdev->priv_uar); + mlx4_pd_free(dev, mdev->priv_pdn); + if (mdev->nb.notifier_call) + unregister_netdevice_notifier(&mdev->nb); + kfree(mdev); +} + +static void *mlx4_en_add(struct mlx4_dev *dev) +{ + struct mlx4_en_dev *mdev; + int i; + + printk_once(KERN_INFO "%s", mlx4_en_version); + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + goto err_free_res; + + if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) + goto err_free_dev; + + if (mlx4_uar_alloc(dev, &mdev->priv_uar)) + goto err_pd; + + mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, + PAGE_SIZE); + if (!mdev->uar_map) + goto err_uar; + spin_lock_init(&mdev->uar_lock); + + mdev->dev = dev; + mdev->dma_device = &dev->persist->pdev->dev; + mdev->pdev = dev->persist->pdev; + mdev->device_up = false; + + mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); + if (!mdev->LSO_support) + mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n"); + + if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, + MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, + 0, 0, &mdev->mr)) { + mlx4_err(mdev, "Failed allocating memory region\n"); + goto err_map; + } + if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { + mlx4_err(mdev, "Failed enabling memory region\n"); + goto err_mr; + } + + /* Build device profile according to supplied module parameters */ + if (mlx4_en_get_profile(mdev)) { + mlx4_err(mdev, "Bad module parameters, aborting\n"); + goto err_mr; + } + + /* Configure which ports to start according to module parameters */ + mdev->port_cnt = 0; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + mdev->port_cnt++; + + /* Initialize time stamp mechanism */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_init_timestamp(mdev); + + /* Set default number of RX rings*/ + mlx4_en_set_num_rx_rings(mdev); + + /* Create our own workqueue for reset/multicast tasks + * Note: we cannot use the shared workqueue because of deadlocks caused + * by the rtnl lock */ + mdev->workqueue = create_singlethread_workqueue("mlx4_en"); + if (!mdev->workqueue) + goto err_mr; + + /* At this stage all non-port specific tasks are complete: + * mark the card state as up */ + mutex_init(&mdev->state_lock); + mdev->device_up = true; + + /* Setup ports */ + + /* Create a netdev for each port */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + mlx4_info(mdev, "Activating port:%d\n", i); + if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) + mdev->pndev[i] = NULL; + } + /* register notifier */ + mdev->nb.notifier_call = mlx4_en_netdev_event; + if (register_netdevice_notifier(&mdev->nb)) { + mdev->nb.notifier_call = NULL; + mlx4_err(mdev, "Failed to create notifier\n"); + } + + return mdev; + +err_mr: + (void) mlx4_mr_free(dev, &mdev->mr); +err_map: + if (mdev->uar_map) + iounmap(mdev->uar_map); +err_uar: + mlx4_uar_free(dev, &mdev->priv_uar); +err_pd: + mlx4_pd_free(dev, mdev->priv_pdn); +err_free_dev: + kfree(mdev); +err_free_res: + return NULL; +} + +static struct mlx4_interface mlx4_en_interface = { + .add = mlx4_en_add, + .remove = mlx4_en_remove, + .event = mlx4_en_event, + .get_dev = mlx4_en_get_netdev, + .protocol = MLX4_PROT_ETH, +}; + +static void mlx4_en_verify_params(void) +{ + if (pfctx > MAX_PFC_TX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfctx, MAX_PFC_TX); + pfctx = 0; + } + + if (pfcrx > MAX_PFC_RX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfcrx, MAX_PFC_RX); + pfcrx = 0; + } + + if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) { + pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n", + inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE); + inline_thold = MAX_INLINE; + } +} + +static int __init mlx4_en_init(void) +{ + mlx4_en_verify_params(); + + return mlx4_register_interface(&mlx4_en_interface); +} + +static void __exit mlx4_en_cleanup(void) +{ + mlx4_unregister_interface(&mlx4_en_interface); +} + +module_init(mlx4_en_init); +module_exit(mlx4_en_cleanup); + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c new file mode 100644 index 000000000..a5a0b8420 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -0,0 +1,3092 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/etherdevice.h> +#include <linux/tcp.h> +#include <linux/if_vlan.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/hash.h> +#include <net/ip.h> +#include <net/busy_poll.h> +#include <net/vxlan.h> + +#include <linux/mlx4/driver.h> +#include <linux/mlx4/device.h> +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/cq.h> + +#include "mlx4_en.h" +#include "en_port.h" + +int mlx4_en_setup_tc(struct net_device *dev, u8 up) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int i; + unsigned int offset = 0; + + if (up && up != MLX4_EN_NUM_UP) + return -EINVAL; + + netdev_set_num_tc(dev, up); + + /* Partition Tx queues evenly amongst UP's */ + for (i = 0; i < up; i++) { + netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); + offset += priv->num_tx_rings_p_up; + } + + return 0; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int mlx4_en_low_latency_recv(struct napi_struct *napi) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; + int done; + + if (!priv->port_up) + return LL_FLUSH_FAILED; + + if (!mlx4_en_cq_lock_poll(cq)) + return LL_FLUSH_BUSY; + + done = mlx4_en_process_rx_cq(dev, cq, 4); + if (likely(done)) + rx_ring->cleaned += done; + else + rx_ring->misses++; + + mlx4_en_cq_unlock_poll(cq); + + return done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +#ifdef CONFIG_RFS_ACCEL + +struct mlx4_en_filter { + struct list_head next; + struct work_struct work; + + u8 ip_proto; + __be32 src_ip; + __be32 dst_ip; + __be16 src_port; + __be16 dst_port; + + int rxq_index; + struct mlx4_en_priv *priv; + u32 flow_id; /* RFS infrastructure id */ + int id; /* mlx4_en driver id */ + u64 reg_id; /* Flow steering API id */ + u8 activated; /* Used to prevent expiry before filter + * is attached + */ + struct hlist_node filter_chain; +}; + +static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); + +static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) +{ + switch (ip_proto) { + case IPPROTO_UDP: + return MLX4_NET_TRANS_RULE_ID_UDP; + case IPPROTO_TCP: + return MLX4_NET_TRANS_RULE_ID_TCP; + default: + return MLX4_NET_TRANS_RULE_NUM; + } +}; + +static void mlx4_en_filter_work(struct work_struct *work) +{ + struct mlx4_en_filter *filter = container_of(work, + struct mlx4_en_filter, + work); + struct mlx4_en_priv *priv = filter->priv; + struct mlx4_spec_list spec_tcp_udp = { + .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), + { + .tcp_udp = { + .dst_port = filter->dst_port, + .dst_port_msk = (__force __be16)-1, + .src_port = filter->src_port, + .src_port_msk = (__force __be16)-1, + }, + }, + }; + struct mlx4_spec_list spec_ip = { + .id = MLX4_NET_TRANS_RULE_ID_IPV4, + { + .ipv4 = { + .dst_ip = filter->dst_ip, + .dst_ip_msk = (__force __be32)-1, + .src_ip = filter->src_ip, + .src_ip_msk = (__force __be32)-1, + }, + }, + }; + struct mlx4_spec_list spec_eth = { + .id = MLX4_NET_TRANS_RULE_ID_ETH, + }; + struct mlx4_net_trans_rule rule = { + .list = LIST_HEAD_INIT(rule.list), + .queue_mode = MLX4_NET_TRANS_Q_LIFO, + .exclusive = 1, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + .port = priv->port, + .priority = MLX4_DOMAIN_RFS, + }; + int rc; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { + en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", + filter->ip_proto); + goto ignore; + } + list_add_tail(&spec_eth.list, &rule.list); + list_add_tail(&spec_ip.list, &rule.list); + list_add_tail(&spec_tcp_udp.list, &rule.list); + + rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; + memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); + memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + + filter->activated = 0; + + if (filter->reg_id) { + rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); + if (rc && rc != -ENOENT) + en_err(priv, "Error detaching flow. rc = %d\n", rc); + } + + rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); + if (rc) + en_err(priv, "Error attaching flow. err = %d\n", rc); + +ignore: + mlx4_en_filter_rfs_expire(priv); + + filter->activated = 1; +} + +static inline struct hlist_head * +filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, + __be16 src_port, __be16 dst_port) +{ + unsigned long l; + int bucket_idx; + + l = (__force unsigned long)src_port | + ((__force unsigned long)dst_port << 2); + l ^= (__force unsigned long)(src_ip ^ dst_ip); + + bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); + + return &priv->filter_hash[bucket_idx]; +} + +static struct mlx4_en_filter * +mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, + __be32 dst_ip, u8 ip_proto, __be16 src_port, + __be16 dst_port, u32 flow_id) +{ + struct mlx4_en_filter *filter = NULL; + + filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); + if (!filter) + return NULL; + + filter->priv = priv; + filter->rxq_index = rxq_index; + INIT_WORK(&filter->work, mlx4_en_filter_work); + + filter->src_ip = src_ip; + filter->dst_ip = dst_ip; + filter->ip_proto = ip_proto; + filter->src_port = src_port; + filter->dst_port = dst_port; + + filter->flow_id = flow_id; + + filter->id = priv->last_filter_id++ % RPS_NO_FILTER; + + list_add_tail(&filter->next, &priv->filters); + hlist_add_head(&filter->filter_chain, + filter_hash_bucket(priv, src_ip, dst_ip, src_port, + dst_port)); + + return filter; +} + +static void mlx4_en_filter_free(struct mlx4_en_filter *filter) +{ + struct mlx4_en_priv *priv = filter->priv; + int rc; + + list_del(&filter->next); + + rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); + if (rc && rc != -ENOENT) + en_err(priv, "Error detaching flow. rc = %d\n", rc); + + kfree(filter); +} + +static inline struct mlx4_en_filter * +mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, + u8 ip_proto, __be16 src_port, __be16 dst_port) +{ + struct mlx4_en_filter *filter; + struct mlx4_en_filter *ret = NULL; + + hlist_for_each_entry(filter, + filter_hash_bucket(priv, src_ip, dst_ip, + src_port, dst_port), + filter_chain) { + if (filter->src_ip == src_ip && + filter->dst_ip == dst_ip && + filter->ip_proto == ip_proto && + filter->src_port == src_port && + filter->dst_port == dst_port) { + ret = filter; + break; + } + } + + return ret; +} + +static int +mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct mlx4_en_priv *priv = netdev_priv(net_dev); + struct mlx4_en_filter *filter; + const struct iphdr *ip; + const __be16 *ports; + u8 ip_proto; + __be32 src_ip; + __be32 dst_ip; + __be16 src_port; + __be16 dst_port; + int nhoff = skb_network_offset(skb); + int ret = 0; + + if (skb->protocol != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + + ip = (const struct iphdr *)(skb->data + nhoff); + if (ip_is_fragment(ip)) + return -EPROTONOSUPPORT; + + if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) + return -EPROTONOSUPPORT; + ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + + ip_proto = ip->protocol; + src_ip = ip->saddr; + dst_ip = ip->daddr; + src_port = ports[0]; + dst_port = ports[1]; + + spin_lock_bh(&priv->filters_lock); + filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, + src_port, dst_port); + if (filter) { + if (filter->rxq_index == rxq_index) + goto out; + + filter->rxq_index = rxq_index; + } else { + filter = mlx4_en_filter_alloc(priv, rxq_index, + src_ip, dst_ip, ip_proto, + src_port, dst_port, flow_id); + if (!filter) { + ret = -ENOMEM; + goto err; + } + } + + queue_work(priv->mdev->workqueue, &filter->work); + +out: + ret = filter->id; +err: + spin_unlock_bh(&priv->filters_lock); + + return ret; +} + +void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) +{ + struct mlx4_en_filter *filter, *tmp; + LIST_HEAD(del_list); + + spin_lock_bh(&priv->filters_lock); + list_for_each_entry_safe(filter, tmp, &priv->filters, next) { + list_move(&filter->next, &del_list); + hlist_del(&filter->filter_chain); + } + spin_unlock_bh(&priv->filters_lock); + + list_for_each_entry_safe(filter, tmp, &del_list, next) { + cancel_work_sync(&filter->work); + mlx4_en_filter_free(filter); + } +} + +static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) +{ + struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; + LIST_HEAD(del_list); + int i = 0; + + spin_lock_bh(&priv->filters_lock); + list_for_each_entry_safe(filter, tmp, &priv->filters, next) { + if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) + break; + + if (filter->activated && + !work_pending(&filter->work) && + rps_may_expire_flow(priv->dev, + filter->rxq_index, filter->flow_id, + filter->id)) { + list_move(&filter->next, &del_list); + hlist_del(&filter->filter_chain); + } else + last_filter = filter; + + i++; + } + + if (last_filter && (&last_filter->next != priv->filters.next)) + list_move(&priv->filters, &last_filter->next); + + spin_unlock_bh(&priv->filters_lock); + + list_for_each_entry_safe(filter, tmp, &del_list, next) + mlx4_en_filter_free(filter); +} +#endif + +static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + int idx; + + en_dbg(HW, priv, "adding VLAN:%d\n", vid); + + set_bit(vid, priv->active_vlans); + + /* Add VID to port VLAN filter */ + mutex_lock(&mdev->state_lock); + if (mdev->device_up && priv->port_up) { + err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); + if (err) + en_err(priv, "Failed configuring VLAN filter\n"); + } + if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) + en_dbg(HW, priv, "failed adding vlan %d\n", vid); + mutex_unlock(&mdev->state_lock); + + return 0; +} + +static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + en_dbg(HW, priv, "Killing VID:%d\n", vid); + + clear_bit(vid, priv->active_vlans); + + /* Remove VID from port VLAN filter */ + mutex_lock(&mdev->state_lock); + mlx4_unregister_vlan(mdev->dev, priv->port, vid); + + if (mdev->device_up && priv->port_up) { + err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); + if (err) + en_err(priv, "Failed configuring VLAN filter\n"); + } + mutex_unlock(&mdev->state_lock); + + return 0; +} + +static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) +{ + int i; + for (i = ETH_ALEN - 1; i >= 0; --i) { + dst_mac[i] = src_mac & 0xff; + src_mac >>= 8; + } + memset(&dst_mac[ETH_ALEN], 0, 2); +} + + +static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, + int qpn, u64 *reg_id) +{ + int err; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || + priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) + return 0; /* do nothing */ + + err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, + MLX4_DOMAIN_NIC, reg_id); + if (err) { + en_err(priv, "failed to add vxlan steering rule, err %d\n", err); + return err; + } + en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); + return 0; +} + + +static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, + unsigned char *mac, int *qpn, u64 *reg_id) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int err; + + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = *qpn; + memcpy(&gid[10], mac, ETH_ALEN); + gid[5] = priv->port; + + err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + struct mlx4_spec_list spec_eth = { {NULL} }; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + .priority = MLX4_DOMAIN_NIC, + }; + + rule.port = priv->port; + rule.qpn = *qpn; + INIT_LIST_HEAD(&rule.list); + + spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); + memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + list_add_tail(&spec_eth.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + break; + } + default: + return -EINVAL; + } + if (err) + en_warn(priv, "Failed Attaching Unicast\n"); + + return err; +} + +static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, + unsigned char *mac, int qpn, u64 reg_id) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = qpn; + memcpy(&gid[10], mac, ETH_ALEN); + gid[5] = priv->port; + + mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + mlx4_flow_detach(dev, reg_id); + break; + } + default: + en_err(priv, "Invalid steering mode.\n"); + } +} + +static int mlx4_en_get_qp(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + struct mlx4_mac_entry *entry; + int index = 0; + int err = 0; + u64 reg_id = 0; + int *qpn = &priv->base_qpn; + u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); + + en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", + priv->dev->dev_addr); + index = mlx4_register_mac(dev, priv->port, mac); + if (index < 0) { + err = index; + en_err(priv, "Failed adding MAC: %pM\n", + priv->dev->dev_addr); + return err; + } + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + int base_qpn = mlx4_get_base_qpn(dev, priv->port); + *qpn = base_qpn + index; + return 0; + } + + err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); + en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); + if (err) { + en_err(priv, "Failed to reserve qp for mac registration\n"); + goto qp_err; + } + + err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); + if (err) + goto steer_err; + + err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, + &priv->tunnel_reg_id); + if (err) + goto tunnel_err; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto alloc_err; + } + memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); + memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); + entry->reg_id = reg_id; + + hlist_add_head_rcu(&entry->hlist, + &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); + + return 0; + +alloc_err: + if (priv->tunnel_reg_id) + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); +tunnel_err: + mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); + +steer_err: + mlx4_qp_release_range(dev, *qpn, 1); + +qp_err: + mlx4_unregister_mac(dev, priv->port, mac); + return err; +} + +static void mlx4_en_put_qp(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int qpn = priv->base_qpn; + u64 mac; + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + mac = mlx4_mac_to_u64(priv->dev->dev_addr); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + priv->dev->dev_addr); + mlx4_unregister_mac(dev, priv->port, mac); + } else { + struct mlx4_mac_entry *entry; + struct hlist_node *tmp; + struct hlist_head *bucket; + unsigned int i; + + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + mac = mlx4_mac_to_u64(entry->mac); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + entry->mac); + mlx4_en_uc_steer_release(priv, entry->mac, + qpn, entry->reg_id); + + mlx4_unregister_mac(dev, priv->port, mac); + hlist_del_rcu(&entry->hlist); + kfree_rcu(entry, rcu); + } + } + + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + + en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", + priv->port, qpn); + mlx4_qp_release_range(dev, qpn, 1); + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; + } +} + +static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, + unsigned char *new_mac, unsigned char *prev_mac) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int err = 0; + u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { + struct hlist_head *bucket; + unsigned int mac_hash; + struct mlx4_mac_entry *entry; + struct hlist_node *tmp; + u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); + + bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, prev_mac)) { + mlx4_en_uc_steer_release(priv, entry->mac, + qpn, entry->reg_id); + mlx4_unregister_mac(dev, priv->port, + prev_mac_u64); + hlist_del_rcu(&entry->hlist); + synchronize_rcu(); + memcpy(entry->mac, new_mac, ETH_ALEN); + entry->reg_id = 0; + mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; + hlist_add_head_rcu(&entry->hlist, + &priv->mac_hash[mac_hash]); + mlx4_register_mac(dev, priv->port, new_mac_u64); + err = mlx4_en_uc_steer_add(priv, new_mac, + &qpn, + &entry->reg_id); + if (err) + return err; + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, + &priv->tunnel_reg_id); + return err; + } + } + return -EINVAL; + } + + return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); +} + +static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, + unsigned char new_mac[ETH_ALEN + 2]) +{ + int err = 0; + + if (priv->port_up) { + /* Remove old MAC and insert the new one */ + err = mlx4_en_replace_mac(priv, priv->base_qpn, + new_mac, priv->current_mac); + if (err) + en_err(priv, "Failed changing HW MAC address\n"); + } else + en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); + + if (!err) + memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); + + return err; +} + +static int mlx4_en_set_mac(struct net_device *dev, void *addr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct sockaddr *saddr = addr; + unsigned char new_mac[ETH_ALEN + 2]; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + mutex_lock(&mdev->state_lock); + memcpy(new_mac, saddr->sa_data, ETH_ALEN); + err = mlx4_en_do_set_mac(priv, new_mac); + if (!err) + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + mutex_unlock(&mdev->state_lock); + + return err; +} + +static void mlx4_en_clear_list(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_mc_list *tmp, *mc_to_del; + + list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { + list_del(&mc_to_del->list); + kfree(mc_to_del); + } +} + +static void mlx4_en_cache_mclist(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + struct mlx4_en_mc_list *tmp; + + mlx4_en_clear_list(dev); + netdev_for_each_mc_addr(ha, dev) { + tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); + if (!tmp) { + mlx4_en_clear_list(dev); + return; + } + memcpy(tmp->addr, ha->addr, ETH_ALEN); + list_add_tail(&tmp->list, &priv->mc_list); + } +} + +static void update_mclist_flags(struct mlx4_en_priv *priv, + struct list_head *dst, + struct list_head *src) +{ + struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; + bool found; + + /* Find all the entries that should be removed from dst, + * These are the entries that are not found in src + */ + list_for_each_entry(dst_tmp, dst, list) { + found = false; + list_for_each_entry(src_tmp, src, list) { + if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { + found = true; + break; + } + } + if (!found) + dst_tmp->action = MCLIST_REM; + } + + /* Add entries that exist in src but not in dst + * mark them as need to add + */ + list_for_each_entry(src_tmp, src, list) { + found = false; + list_for_each_entry(dst_tmp, dst, list) { + if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { + dst_tmp->action = MCLIST_NONE; + found = true; + break; + } + } + if (!found) { + new_mc = kmemdup(src_tmp, + sizeof(struct mlx4_en_mc_list), + GFP_KERNEL); + if (!new_mc) + return; + + new_mc->action = MCLIST_ADD; + list_add_tail(&new_mc->list, dst); + } + } +} + +static void mlx4_en_set_rx_mode(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (!priv->port_up) + return; + + queue_work(priv->mdev->workqueue, &priv->rx_mode_task); +} + +static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, + struct mlx4_en_dev *mdev) +{ + int err = 0; + + if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { + if (netif_msg_rx_status(priv)) + en_warn(priv, "Entering promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_PROMISC; + + /* Enable promiscouos mode */ + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_add(mdev->dev, + priv->port, + priv->base_qpn, + MLX4_FS_ALL_DEFAULT); + if (err) + en_err(priv, "Failed enabling promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_unicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed enabling unicast promiscuous mode\n"); + + /* Add the default qp number as multicast + * promisc + */ + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { + err = mlx4_multicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed enabling multicast promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + } + break; + + case MLX4_STEERING_MODE_A0: + err = mlx4_SET_PORT_qpn_calc(mdev->dev, + priv->port, + priv->base_qpn, + 1); + if (err) + en_err(priv, "Failed enabling promiscuous mode\n"); + break; + } + + /* Disable port multicast filter (unconditionally) */ + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_DISABLE); + if (err) + en_err(priv, "Failed disabling multicast filter\n"); + } +} + +static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, + struct mlx4_en_dev *mdev) +{ + int err = 0; + + if (netif_msg_rx_status(priv)) + en_warn(priv, "Leaving promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_PROMISC; + + /* Disable promiscouos mode */ + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_ALL_DEFAULT); + if (err) + en_err(priv, "Failed disabling promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_unicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed disabling unicast promiscuous mode\n"); + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + err = mlx4_multicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed disabling multicast promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + break; + + case MLX4_STEERING_MODE_A0: + err = mlx4_SET_PORT_qpn_calc(mdev->dev, + priv->port, + priv->base_qpn, 0); + if (err) + en_err(priv, "Failed disabling promiscuous mode\n"); + break; + } +} + +static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, + struct net_device *dev, + struct mlx4_en_dev *mdev) +{ + struct mlx4_en_mc_list *mclist, *tmp; + u64 mcast_addr = 0; + u8 mc_list[16] = {0}; + int err = 0; + + /* Enable/disable the multicast filter according to IFF_ALLMULTI */ + if (dev->flags & IFF_ALLMULTI) { + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_DISABLE); + if (err) + en_err(priv, "Failed disabling multicast filter\n"); + + /* Add the default qp number as multicast promisc */ + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_add(mdev->dev, + priv->port, + priv->base_qpn, + MLX4_FS_MC_DEFAULT); + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_multicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + break; + + case MLX4_STEERING_MODE_A0: + break; + } + if (err) + en_err(priv, "Failed entering multicast promisc mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + } + } else { + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_MC_DEFAULT); + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_multicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + break; + + case MLX4_STEERING_MODE_A0: + break; + } + if (err) + en_err(priv, "Failed disabling multicast promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_DISABLE); + if (err) + en_err(priv, "Failed disabling multicast filter\n"); + + /* Flush mcast filter and init it with broadcast address */ + mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, + 1, MLX4_MCAST_CONFIG); + + /* Update multicast list - we cache all addresses so they won't + * change while HW is updated holding the command semaphor */ + netif_addr_lock_bh(dev); + mlx4_en_cache_mclist(dev); + netif_addr_unlock_bh(dev); + list_for_each_entry(mclist, &priv->mc_list, list) { + mcast_addr = mlx4_mac_to_u64(mclist->addr); + mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, + mcast_addr, 0, MLX4_MCAST_CONFIG); + } + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_ENABLE); + if (err) + en_err(priv, "Failed enabling multicast filter\n"); + + update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); + list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { + if (mclist->action == MCLIST_REM) { + /* detach this address and delete from list */ + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + mc_list[5] = priv->port; + err = mlx4_multicast_detach(mdev->dev, + &priv->rss_map.indir_qp, + mc_list, + MLX4_PROT_ETH, + mclist->reg_id); + if (err) + en_err(priv, "Fail to detach multicast address\n"); + + if (mclist->tunnel_reg_id) { + err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); + if (err) + en_err(priv, "Failed to detach multicast address\n"); + } + + /* remove from list */ + list_del(&mclist->list); + kfree(mclist); + } else if (mclist->action == MCLIST_ADD) { + /* attach the address */ + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + /* needed for B0 steering support */ + mc_list[5] = priv->port; + err = mlx4_multicast_attach(mdev->dev, + &priv->rss_map.indir_qp, + mc_list, + priv->port, 0, + MLX4_PROT_ETH, + &mclist->reg_id); + if (err) + en_err(priv, "Fail to attach multicast address\n"); + + err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, + &mclist->tunnel_reg_id); + if (err) + en_err(priv, "Failed to attach multicast address\n"); + } + } + } +} + +static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, + struct net_device *dev, + struct mlx4_en_dev *mdev) +{ + struct netdev_hw_addr *ha; + struct mlx4_mac_entry *entry; + struct hlist_node *tmp; + bool found; + u64 mac; + int err = 0; + struct hlist_head *bucket; + unsigned int i; + int removed = 0; + u32 prev_flags; + + /* Note that we do not need to protect our mac_hash traversal with rcu, + * since all modification code is protected by mdev->state_lock + */ + + /* find what to remove */ + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + found = false; + netdev_for_each_uc_addr(ha, dev) { + if (ether_addr_equal_64bits(entry->mac, + ha->addr)) { + found = true; + break; + } + } + + /* MAC address of the port is not in uc list */ + if (ether_addr_equal_64bits(entry->mac, + priv->current_mac)) + found = true; + + if (!found) { + mac = mlx4_mac_to_u64(entry->mac); + mlx4_en_uc_steer_release(priv, entry->mac, + priv->base_qpn, + entry->reg_id); + mlx4_unregister_mac(mdev->dev, priv->port, mac); + + hlist_del_rcu(&entry->hlist); + kfree_rcu(entry, rcu); + en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", + entry->mac, priv->port); + ++removed; + } + } + } + + /* if we didn't remove anything, there is no use in trying to add + * again once we are in a forced promisc mode state + */ + if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) + return; + + prev_flags = priv->flags; + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; + + /* find what to add */ + netdev_for_each_uc_addr(ha, dev) { + found = false; + bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; + hlist_for_each_entry(entry, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, ha->addr)) { + found = true; + break; + } + } + + if (!found) { + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", + ha->addr, priv->port); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } + mac = mlx4_mac_to_u64(ha->addr); + memcpy(entry->mac, ha->addr, ETH_ALEN); + err = mlx4_register_mac(mdev->dev, priv->port, mac); + if (err < 0) { + en_err(priv, "Failed registering MAC %pM on port %d: %d\n", + ha->addr, priv->port, err); + kfree(entry); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } + err = mlx4_en_uc_steer_add(priv, ha->addr, + &priv->base_qpn, + &entry->reg_id); + if (err) { + en_err(priv, "Failed adding MAC %pM on port %d: %d\n", + ha->addr, priv->port, err); + mlx4_unregister_mac(mdev->dev, priv->port, mac); + kfree(entry); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } else { + unsigned int mac_hash; + en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", + ha->addr, priv->port); + mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + hlist_add_head_rcu(&entry->hlist, bucket); + } + } + } + + if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { + en_warn(priv, "Forcing promiscuous mode on port:%d\n", + priv->port); + } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { + en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", + priv->port); + } +} + +static void mlx4_en_do_set_rx_mode(struct work_struct *work) +{ + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + rx_mode_task); + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + + mutex_lock(&mdev->state_lock); + if (!mdev->device_up) { + en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); + goto out; + } + if (!priv->port_up) { + en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); + goto out; + } + + if (!netif_carrier_ok(dev)) { + if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { + if (priv->port_state.link_state) { + priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; + netif_carrier_on(dev); + en_dbg(LINK, priv, "Link Up\n"); + } + } + } + + if (dev->priv_flags & IFF_UNICAST_FLT) + mlx4_en_do_uc_filter(priv, dev, mdev); + + /* Promsicuous mode: disable all filters */ + if ((dev->flags & IFF_PROMISC) || + (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { + mlx4_en_set_promisc_mode(priv, mdev); + goto out; + } + + /* Not in promiscuous mode */ + if (priv->flags & MLX4_EN_FLAG_PROMISC) + mlx4_en_clear_promisc_mode(priv, mdev); + + mlx4_en_do_multicast(priv, dev, mdev); +out: + mutex_unlock(&mdev->state_lock); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mlx4_en_netpoll(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_cq *cq; + int i; + + for (i = 0; i < priv->rx_ring_num; i++) { + cq = priv->rx_cq[i]; + napi_schedule(&cq->napi); + } +} +#endif + +static void mlx4_en_tx_timeout(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int i; + + if (netif_msg_timer(priv)) + en_warn(priv, "Tx timeout called on port:%d\n", priv->port); + + for (i = 0; i < priv->tx_ring_num; i++) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) + continue; + en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", + i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, + priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); + } + + priv->port_stats.tx_timeout++; + en_dbg(DRV, priv, "Scheduling watchdog\n"); + queue_work(mdev->workqueue, &priv->watchdog_task); +} + + +static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + spin_lock_bh(&priv->stats_lock); + memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); + spin_unlock_bh(&priv->stats_lock); + + return &priv->ret_stats; +} + +static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) +{ + struct mlx4_en_cq *cq; + int i; + + /* If we haven't received a specific coalescing setting + * (module param), we set the moderation parameters as follows: + * - moder_cnt is set to the number of mtu sized packets to + * satisfy our coalescing target. + * - moder_time is set to a fixed value. + */ + priv->rx_frames = MLX4_EN_RX_COAL_TARGET; + priv->rx_usecs = MLX4_EN_RX_COAL_TIME; + priv->tx_frames = MLX4_EN_TX_COAL_PKTS; + priv->tx_usecs = MLX4_EN_TX_COAL_TIME; + en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", + priv->dev->mtu, priv->rx_frames, priv->rx_usecs); + + /* Setup cq moderation params */ + for (i = 0; i < priv->rx_ring_num; i++) { + cq = priv->rx_cq[i]; + cq->moder_cnt = priv->rx_frames; + cq->moder_time = priv->rx_usecs; + priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; + priv->last_moder_packets[i] = 0; + priv->last_moder_bytes[i] = 0; + } + + for (i = 0; i < priv->tx_ring_num; i++) { + cq = priv->tx_cq[i]; + cq->moder_cnt = priv->tx_frames; + cq->moder_time = priv->tx_usecs; + } + + /* Reset auto-moderation params */ + priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; + priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; + priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; + priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; + priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; + priv->adaptive_rx_coal = 1; + priv->last_moder_jiffies = 0; + priv->last_moder_tx_packets = 0; +} + +static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) +{ + unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); + struct mlx4_en_cq *cq; + unsigned long packets; + unsigned long rate; + unsigned long avg_pkt_size; + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long rx_pkt_diff; + int moder_time; + int ring, err; + + if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + spin_lock_bh(&priv->stats_lock); + rx_packets = priv->rx_ring[ring]->packets; + rx_bytes = priv->rx_ring[ring]->bytes; + spin_unlock_bh(&priv->stats_lock); + + rx_pkt_diff = ((unsigned long) (rx_packets - + priv->last_moder_packets[ring])); + packets = rx_pkt_diff; + rate = packets * HZ / period; + avg_pkt_size = packets ? ((unsigned long) (rx_bytes - + priv->last_moder_bytes[ring])) / packets : 0; + + /* Apply auto-moderation only when packet rate + * exceeds a rate that it matters */ + if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && + avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { + if (rate < priv->pkt_rate_low) + moder_time = priv->rx_usecs_low; + else if (rate > priv->pkt_rate_high) + moder_time = priv->rx_usecs_high; + else + moder_time = (rate - priv->pkt_rate_low) * + (priv->rx_usecs_high - priv->rx_usecs_low) / + (priv->pkt_rate_high - priv->pkt_rate_low) + + priv->rx_usecs_low; + } else { + moder_time = priv->rx_usecs_low; + } + + if (moder_time != priv->last_moder_time[ring]) { + priv->last_moder_time[ring] = moder_time; + cq = priv->rx_cq[ring]; + cq->moder_time = moder_time; + cq->moder_cnt = priv->rx_frames; + err = mlx4_en_set_cq_moder(priv, cq); + if (err) + en_err(priv, "Failed modifying moderation for cq:%d\n", + ring); + } + priv->last_moder_packets[ring] = rx_packets; + priv->last_moder_bytes[ring] = rx_bytes; + } + + priv->last_moder_jiffies = jiffies; +} + +static void mlx4_en_do_get_stats(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, + stats_task); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + mutex_lock(&mdev->state_lock); + if (mdev->device_up) { + if (priv->port_up) { + err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); + if (err) + en_dbg(HW, priv, "Could not update stats\n"); + + mlx4_en_auto_moderation(priv); + } + + queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); + } + if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { + mlx4_en_do_set_mac(priv, priv->current_mac); + mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; + } + mutex_unlock(&mdev->state_lock); +} + +/* mlx4_en_service_task - Run service task for tasks that needed to be done + * periodically + */ +static void mlx4_en_service_task(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, + service_task); + struct mlx4_en_dev *mdev = priv->mdev; + + mutex_lock(&mdev->state_lock); + if (mdev->device_up) { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_ptp_overflow_check(mdev); + + mlx4_en_recover_from_oom(priv); + queue_delayed_work(mdev->workqueue, &priv->service_task, + SERVICE_TASK_DELAY); + } + mutex_unlock(&mdev->state_lock); +} + +static void mlx4_en_linkstate(struct work_struct *work) +{ + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + linkstate_task); + struct mlx4_en_dev *mdev = priv->mdev; + int linkstate = priv->link_state; + + mutex_lock(&mdev->state_lock); + /* If observable port state changed set carrier state and + * report to system log */ + if (priv->last_link_state != linkstate) { + if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { + en_info(priv, "Link Down\n"); + netif_carrier_off(priv->dev); + } else { + en_info(priv, "Link Up\n"); + netif_carrier_on(priv->dev); + } + } + priv->last_link_state = linkstate; + mutex_unlock(&mdev->state_lock); +} + +static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) +{ + struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; + int numa_node = priv->mdev->dev->numa_node; + + if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), + ring->affinity_mask); + return 0; +} + +static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) +{ + free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); +} + +int mlx4_en_start_port(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq; + struct mlx4_en_tx_ring *tx_ring; + int rx_index = 0; + int tx_index = 0; + int err = 0; + int i; + int j; + u8 mc_list[16] = {0}; + + if (priv->port_up) { + en_dbg(DRV, priv, "start port called while port already up\n"); + return 0; + } + + INIT_LIST_HEAD(&priv->mc_list); + INIT_LIST_HEAD(&priv->curr_list); + INIT_LIST_HEAD(&priv->ethtool_list); + memset(&priv->ethtool_rules[0], 0, + sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); + + /* Calculate Rx buf size */ + dev->mtu = min(dev->mtu, priv->max_mtu); + mlx4_en_calc_rx_buf(dev); + en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); + + /* Configure rx cq's and rings */ + err = mlx4_en_activate_rx_rings(priv); + if (err) { + en_err(priv, "Failed to activate RX rings\n"); + return err; + } + for (i = 0; i < priv->rx_ring_num; i++) { + cq = priv->rx_cq[i]; + + mlx4_en_cq_init_lock(cq); + + err = mlx4_en_init_affinity_hint(priv, i); + if (err) { + en_err(priv, "Failed preparing IRQ affinity hint\n"); + goto cq_err; + } + + err = mlx4_en_activate_cq(priv, cq, i); + if (err) { + en_err(priv, "Failed activating Rx CQ\n"); + mlx4_en_free_affinity_hint(priv, i); + goto cq_err; + } + + for (j = 0; j < cq->size; j++) { + struct mlx4_cqe *cqe = NULL; + + cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + + priv->cqe_factor; + cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; + } + + err = mlx4_en_set_cq_moder(priv, cq); + if (err) { + en_err(priv, "Failed setting cq moderation parameters\n"); + mlx4_en_deactivate_cq(priv, cq); + mlx4_en_free_affinity_hint(priv, i); + goto cq_err; + } + mlx4_en_arm_cq(priv, cq); + priv->rx_ring[i]->cqn = cq->mcq.cqn; + ++rx_index; + } + + /* Set qp number */ + en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); + err = mlx4_en_get_qp(priv); + if (err) { + en_err(priv, "Failed getting eth qp\n"); + goto cq_err; + } + mdev->mac_removed[priv->port] = 0; + + err = mlx4_en_config_rss_steer(priv); + if (err) { + en_err(priv, "Failed configuring rss steering\n"); + goto mac_err; + } + + err = mlx4_en_create_drop_qp(priv); + if (err) + goto rss_err; + + /* Configure tx cq's and rings */ + for (i = 0; i < priv->tx_ring_num; i++) { + /* Configure cq */ + cq = priv->tx_cq[i]; + err = mlx4_en_activate_cq(priv, cq, i); + if (err) { + en_err(priv, "Failed allocating Tx CQ\n"); + goto tx_err; + } + err = mlx4_en_set_cq_moder(priv, cq); + if (err) { + en_err(priv, "Failed setting cq moderation parameters\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); + cq->buf->wqe_index = cpu_to_be16(0xffff); + + /* Configure ring */ + tx_ring = priv->tx_ring[i]; + err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, + i / priv->num_tx_rings_p_up); + if (err) { + en_err(priv, "Failed allocating Tx ring\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + + /* Arm CQ for TX completions */ + mlx4_en_arm_cq(priv, cq); + + /* Set initial ownership of all Tx TXBBs to SW (1) */ + for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) + *((u32 *) (tx_ring->buf + j)) = 0xffffffff; + ++tx_index; + } + + /* Configure port */ + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp); + if (err) { + en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", + priv->port, err); + goto tx_err; + } + /* Set default qp number */ + err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); + if (err) { + en_err(priv, "Failed setting default qp numbers\n"); + goto tx_err; + } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); + if (err) { + en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", + err); + goto tx_err; + } + } + + /* Init port */ + en_dbg(HW, priv, "Initializing port\n"); + err = mlx4_INIT_PORT(mdev->dev, priv->port); + if (err) { + en_err(priv, "Failed Initializing port\n"); + goto tx_err; + } + + /* Attach rx QP to bradcast address */ + eth_broadcast_addr(&mc_list[10]); + mc_list[5] = priv->port; /* needed for B0 steering support */ + if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, + priv->port, 0, MLX4_PROT_ETH, + &priv->broadcast_id)) + mlx4_warn(mdev, "Failed Attaching Broadcast\n"); + + /* Must redo promiscuous mode setup. */ + priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); + + /* Schedule multicast task to populate multicast list */ + queue_work(mdev->workqueue, &priv->rx_mode_task); + +#ifdef CONFIG_MLX4_EN_VXLAN + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + vxlan_get_rx_port(dev); +#endif + priv->port_up = true; + netif_tx_start_all_queues(dev); + netif_device_attach(dev); + + return 0; + +tx_err: + while (tx_index--) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); + } + mlx4_en_destroy_drop_qp(priv); +rss_err: + mlx4_en_release_rss_steer(priv); +mac_err: + mlx4_en_put_qp(priv); +cq_err: + while (rx_index--) { + mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); + mlx4_en_free_affinity_hint(priv, rx_index); + } + for (i = 0; i < priv->rx_ring_num; i++) + mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); + + return err; /* need to close devices */ +} + + +void mlx4_en_stop_port(struct net_device *dev, int detach) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_mc_list *mclist, *tmp; + struct ethtool_flow_id *flow, *tmp_flow; + int i; + u8 mc_list[16] = {0}; + + if (!priv->port_up) { + en_dbg(DRV, priv, "stop port called while port already down\n"); + return; + } + + /* close port*/ + mlx4_CLOSE_PORT(mdev->dev, priv->port); + + /* Synchronize with tx routine */ + netif_tx_lock_bh(dev); + if (detach) + netif_device_detach(dev); + netif_tx_stop_all_queues(dev); + netif_tx_unlock_bh(dev); + + netif_tx_disable(dev); + + /* Set port as not active */ + priv->port_up = false; + + /* Promsicuous mode */ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + priv->flags &= ~(MLX4_EN_FLAG_PROMISC | + MLX4_EN_FLAG_MC_PROMISC); + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_ALL_DEFAULT); + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_MC_DEFAULT); + } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { + priv->flags &= ~MLX4_EN_FLAG_PROMISC; + + /* Disable promiscouos mode */ + mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, + priv->port); + + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, + priv->port); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + } + + /* Detach All multicasts */ + eth_broadcast_addr(&mc_list[10]); + mc_list[5] = priv->port; /* needed for B0 steering support */ + mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, + MLX4_PROT_ETH, priv->broadcast_id); + list_for_each_entry(mclist, &priv->curr_list, list) { + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + mc_list[5] = priv->port; + mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, + mc_list, MLX4_PROT_ETH, mclist->reg_id); + if (mclist->tunnel_reg_id) + mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); + } + mlx4_en_clear_list(dev); + list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { + list_del(&mclist->list); + kfree(mclist); + } + + /* Flush multicast filter */ + mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); + + /* Remove flow steering rules for the port*/ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + ASSERT_RTNL(); + list_for_each_entry_safe(flow, tmp_flow, + &priv->ethtool_list, list) { + mlx4_flow_detach(mdev->dev, flow->id); + list_del(&flow->list); + } + } + + mlx4_en_destroy_drop_qp(priv); + + /* Free TX Rings */ + for (i = 0; i < priv->tx_ring_num; i++) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); + } + msleep(10); + + for (i = 0; i < priv->tx_ring_num; i++) + mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); + + /* Free RSS qps */ + mlx4_en_release_rss_steer(priv); + + /* Unregister Mac address for the port */ + mlx4_en_put_qp(priv); + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) + mdev->mac_removed[priv->port] = 1; + + /* Free RX Rings */ + for (i = 0; i < priv->rx_ring_num; i++) { + struct mlx4_en_cq *cq = priv->rx_cq[i]; + + local_bh_disable(); + while (!mlx4_en_cq_lock_napi(cq)) { + pr_info("CQ %d locked\n", i); + mdelay(1); + } + local_bh_enable(); + + napi_synchronize(&cq->napi); + mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); + mlx4_en_deactivate_cq(priv, cq); + + mlx4_en_free_affinity_hint(priv, i); + } +} + +static void mlx4_en_restart(struct work_struct *work) +{ + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + watchdog_task); + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + + en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + mlx4_en_stop_port(dev, 1); + if (mlx4_en_start_port(dev)) + en_err(priv, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&mdev->state_lock); +} + +static void mlx4_en_clear_stats(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int i; + + if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) + en_dbg(HW, priv, "Failed dumping statistics\n"); + + memset(&priv->stats, 0, sizeof(priv->stats)); + memset(&priv->pstats, 0, sizeof(priv->pstats)); + memset(&priv->pkstats, 0, sizeof(priv->pkstats)); + memset(&priv->port_stats, 0, sizeof(priv->port_stats)); + memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); + memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); + memset(&priv->rx_priority_flowstats, 0, + sizeof(priv->rx_priority_flowstats)); + memset(&priv->tx_priority_flowstats, 0, + sizeof(priv->tx_priority_flowstats)); + + for (i = 0; i < priv->tx_ring_num; i++) { + priv->tx_ring[i]->bytes = 0; + priv->tx_ring[i]->packets = 0; + priv->tx_ring[i]->tx_csum = 0; + } + for (i = 0; i < priv->rx_ring_num; i++) { + priv->rx_ring[i]->bytes = 0; + priv->rx_ring[i]->packets = 0; + priv->rx_ring[i]->csum_ok = 0; + priv->rx_ring[i]->csum_none = 0; + priv->rx_ring[i]->csum_complete = 0; + } +} + +static int mlx4_en_open(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + + mutex_lock(&mdev->state_lock); + + if (!mdev->device_up) { + en_err(priv, "Cannot open - device down/disabled\n"); + err = -EBUSY; + goto out; + } + + /* Reset HW statistics and SW counters */ + mlx4_en_clear_stats(dev); + + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port:%d\n", priv->port); + +out: + mutex_unlock(&mdev->state_lock); + return err; +} + + +static int mlx4_en_close(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + en_dbg(IFDOWN, priv, "Close port called\n"); + + mutex_lock(&mdev->state_lock); + + mlx4_en_stop_port(dev, 0); + netif_carrier_off(dev); + + mutex_unlock(&mdev->state_lock); + return 0; +} + +void mlx4_en_free_resources(struct mlx4_en_priv *priv) +{ + int i; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); + priv->dev->rx_cpu_rmap = NULL; +#endif + + for (i = 0; i < priv->tx_ring_num; i++) { + if (priv->tx_ring && priv->tx_ring[i]) + mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); + if (priv->tx_cq && priv->tx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + } + + for (i = 0; i < priv->rx_ring_num; i++) { + if (priv->rx_ring[i]) + mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], + priv->prof->rx_ring_size, priv->stride); + if (priv->rx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); + } + +} + +int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) +{ + struct mlx4_en_port_profile *prof = priv->prof; + int i; + int node; + + /* Create tx Rings */ + for (i = 0; i < priv->tx_ring_num; i++) { + node = cpu_to_node(i % num_online_cpus()); + if (mlx4_en_create_cq(priv, &priv->tx_cq[i], + prof->tx_ring_size, i, TX, node)) + goto err; + + if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], + prof->tx_ring_size, TXBB_SIZE, + node, i)) + goto err; + } + + /* Create rx Rings */ + for (i = 0; i < priv->rx_ring_num; i++) { + node = cpu_to_node(i % num_online_cpus()); + if (mlx4_en_create_cq(priv, &priv->rx_cq[i], + prof->rx_ring_size, i, RX, node)) + goto err; + + if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], + prof->rx_ring_size, priv->stride, + node)) + goto err; + } + +#ifdef CONFIG_RFS_ACCEL + if (priv->mdev->dev->caps.comp_pool) { + priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); + if (!priv->dev->rx_cpu_rmap) + goto err; + } +#endif + + return 0; + +err: + en_err(priv, "Failed to allocate NIC resources\n"); + for (i = 0; i < priv->rx_ring_num; i++) { + if (priv->rx_ring[i]) + mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], + prof->rx_ring_size, + priv->stride); + if (priv->rx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); + } + for (i = 0; i < priv->tx_ring_num; i++) { + if (priv->tx_ring[i]) + mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); + if (priv->tx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + } + return -ENOMEM; +} + + +void mlx4_en_destroy_netdev(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); + + /* Unregister device - this will close the port if it was up */ + if (priv->registered) + unregister_netdev(dev); + + if (priv->allocated) + mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); + + cancel_delayed_work(&priv->stats_task); + cancel_delayed_work(&priv->service_task); + /* flush any pending task for this netdev */ + flush_workqueue(mdev->workqueue); + + /* Detach the netdev so tasks would not attempt to access it */ + mutex_lock(&mdev->state_lock); + mdev->pndev[priv->port] = NULL; + mdev->upper[priv->port] = NULL; + mutex_unlock(&mdev->state_lock); + + mlx4_en_free_resources(priv); + + kfree(priv->tx_ring); + kfree(priv->tx_cq); + + free_netdev(dev); +} + +static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + + en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", + dev->mtu, new_mtu); + + if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { + en_err(priv, "Bad MTU size:%d.\n", new_mtu); + return -EPERM; + } + dev->mtu = new_mtu; + + if (netif_running(dev)) { + mutex_lock(&mdev->state_lock); + if (!mdev->device_up) { + /* NIC is probably restarting - let watchdog task reset + * the port */ + en_dbg(DRV, priv, "Change MTU called with card down!?\n"); + } else { + mlx4_en_stop_port(dev, 1); + err = mlx4_en_start_port(dev); + if (err) { + en_err(priv, "Failed restarting port:%d\n", + priv->port); + queue_work(mdev->workqueue, &priv->watchdog_task); + } + } + mutex_unlock(&mdev->state_lock); + } + return 0; +} + +static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + /* device doesn't support time stamping */ + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) + return -EINVAL; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (mlx4_en_reset_config(dev, config, dev->features)) { + config.tx_type = HWTSTAMP_TX_OFF; + config.rx_filter = HWTSTAMP_FILTER_NONE; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, + sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; +} + +static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return mlx4_en_hwtstamp_set(dev, ifr); + case SIOCGHWTSTAMP: + return mlx4_en_hwtstamp_get(dev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int mlx4_en_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + bool reset = false; + int ret = 0; + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { + en_info(priv, "Turn %s RX-FCS\n", + (features & NETIF_F_RXFCS) ? "ON" : "OFF"); + reset = true; + } + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { + u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; + + en_info(priv, "Turn %s RX-ALL\n", + ignore_fcs_value ? "ON" : "OFF"); + ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, + priv->port, ignore_fcs_value); + if (ret) + return ret; + } + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + en_info(priv, "Turn %s RX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); + reset = true; + } + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) + en_info(priv, "Turn %s TX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { + en_info(priv, "Turn %s loopback\n", + (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); + mlx4_en_update_loopback_state(netdev, features); + } + + if (reset) { + ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + features); + if (ret) + return ret; + } + + return 0; +} + +static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + u64 mac_u64 = mlx4_mac_to_u64(mac); + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); +} + +static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); +} + +static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, + max_tx_rate); +} + +static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); +} + +static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); +} + +static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); +} + +#define PORT_ID_BYTE_LEN 8 +static int mlx4_en_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_dev *mdev = priv->mdev->dev; + int i; + u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; + + if (!phys_port_id) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(phys_port_id); + for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { + ppid->id[i] = phys_port_id & 0xff; + phys_port_id >>= 8; + } + return 0; +} + +#ifdef CONFIG_MLX4_EN_VXLAN +static void mlx4_en_add_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_add_task); + + ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); + if (ret) + goto out; + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 1); +out: + if (ret) { + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + return; + } + + /* set offloads */ + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; +} + +static void mlx4_en_del_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_del_task); + /* unset offloads */ + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); + priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 0); + if (ret) + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + + priv->vxlan_port = 0; +} + +static void mlx4_en_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port && current_port != port) { + en_warn(priv, "vxlan port %d configured, can't add port %d\n", + ntohs(current_port), ntohs(port)); + return; + } + + priv->vxlan_port = port; + queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); +} + +static void mlx4_en_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port != port) { + en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); + return; + } + + queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); +} + +static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + features = vlan_features_check(skb, features); + return vxlan_features_check(skb, features); +} +#endif + +static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; + struct mlx4_update_qp_params params; + int err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) + return -EOPNOTSUPP; + + /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ + if (maxrate >> 12) { + params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; + params.rate_val = maxrate / 1000; + } else if (maxrate) { + params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; + params.rate_val = maxrate; + } else { /* zero serves to revoke the QP rate-limitation */ + params.rate_unit = 0; + params.rate_val = 0; + } + + err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, + ¶ms); + return err; +} + +static const struct net_device_ops mlx4_netdev_ops = { + .ndo_open = mlx4_en_open, + .ndo_stop = mlx4_en_close, + .ndo_start_xmit = mlx4_en_xmit, + .ndo_select_queue = mlx4_en_select_queue, + .ndo_get_stats = mlx4_en_get_stats, + .ndo_set_rx_mode = mlx4_en_set_rx_mode, + .ndo_set_mac_address = mlx4_en_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_do_ioctl = mlx4_en_ioctl, + .ndo_tx_timeout = mlx4_en_tx_timeout, + .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, +#endif + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx4_en_filter_rfs, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = mlx4_en_low_latency_recv, +#endif + .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_features_check = mlx4_en_features_check, +#endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, +}; + +static const struct net_device_ops mlx4_netdev_ops_master = { + .ndo_open = mlx4_en_open, + .ndo_stop = mlx4_en_close, + .ndo_start_xmit = mlx4_en_xmit, + .ndo_select_queue = mlx4_en_select_queue, + .ndo_get_stats = mlx4_en_get_stats, + .ndo_set_rx_mode = mlx4_en_set_rx_mode, + .ndo_set_mac_address = mlx4_en_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_tx_timeout = mlx4_en_tx_timeout, + .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, + .ndo_set_vf_mac = mlx4_en_set_vf_mac, + .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, + .ndo_set_vf_rate = mlx4_en_set_vf_rate, + .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, + .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, + .ndo_get_vf_config = mlx4_en_get_vf_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, +#endif + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx4_en_filter_rfs, +#endif + .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_features_check = mlx4_en_features_check, +#endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, +}; + +struct mlx4_en_bond { + struct work_struct work; + struct mlx4_en_priv *priv; + int is_bonded; + struct mlx4_port_map port_map; +}; + +static void mlx4_en_bond_work(struct work_struct *work) +{ + struct mlx4_en_bond *bond = container_of(work, + struct mlx4_en_bond, + work); + int err = 0; + struct mlx4_dev *dev = bond->priv->mdev->dev; + + if (bond->is_bonded) { + if (!mlx4_is_bonded(dev)) { + err = mlx4_bond(dev); + if (err) + en_err(bond->priv, "Fail to bond device\n"); + } + if (!err) { + err = mlx4_port_map_set(dev, &bond->port_map); + if (err) + en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", + bond->port_map.port1, + bond->port_map.port2, + err); + } + } else if (mlx4_is_bonded(dev)) { + err = mlx4_unbond(dev); + if (err) + en_err(bond->priv, "Fail to unbond device\n"); + } + dev_put(bond->priv->dev); + kfree(bond); +} + +static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, + u8 v2p_p1, u8 v2p_p2) +{ + struct mlx4_en_bond *bond = NULL; + + bond = kzalloc(sizeof(*bond), GFP_ATOMIC); + if (!bond) + return -ENOMEM; + + INIT_WORK(&bond->work, mlx4_en_bond_work); + bond->priv = priv; + bond->is_bonded = is_bonded; + bond->port_map.port1 = v2p_p1; + bond->port_map.port2 = v2p_p2; + dev_hold(priv->dev); + queue_work(priv->mdev->workqueue, &bond->work); + return 0; +} + +int mlx4_en_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + u8 port = 0; + struct mlx4_en_dev *mdev; + struct mlx4_dev *dev; + int i, num_eth_ports = 0; + bool do_bond = true; + struct mlx4_en_priv *priv; + u8 v2p_port1 = 0; + u8 v2p_port2 = 0; + + if (!net_eq(dev_net(ndev), &init_net)) + return NOTIFY_DONE; + + mdev = container_of(this, struct mlx4_en_dev, nb); + dev = mdev->dev; + + /* Go into this mode only when two network devices set on two ports + * of the same mlx4 device are slaves of the same bonding master + */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + ++num_eth_ports; + if (!port && (mdev->pndev[i] == ndev)) + port = i; + mdev->upper[i] = mdev->pndev[i] ? + netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; + /* condition not met: network device is a slave */ + if (!mdev->upper[i]) + do_bond = false; + if (num_eth_ports < 2) + continue; + /* condition not met: same master */ + if (mdev->upper[i] != mdev->upper[i-1]) + do_bond = false; + } + /* condition not met: 2 salves */ + do_bond = (num_eth_ports == 2) ? do_bond : false; + + /* handle only events that come with enough info */ + if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) + return NOTIFY_DONE; + + priv = netdev_priv(ndev); + if (do_bond) { + struct netdev_notifier_bonding_info *notifier_info = ptr; + struct netdev_bonding_info *bonding_info = + ¬ifier_info->bonding_info; + + /* required mode 1, 2 or 4 */ + if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && + (bonding_info->master.bond_mode != BOND_MODE_XOR) && + (bonding_info->master.bond_mode != BOND_MODE_8023AD)) + do_bond = false; + + /* require exactly 2 slaves */ + if (bonding_info->master.num_slaves != 2) + do_bond = false; + + /* calc v2p */ + if (do_bond) { + if (bonding_info->master.bond_mode == + BOND_MODE_ACTIVEBACKUP) { + /* in active-backup mode virtual ports are + * mapped to the physical port of the active + * slave */ + if (bonding_info->slave.state == + BOND_STATE_BACKUP) { + if (port == 1) { + v2p_port1 = 2; + v2p_port2 = 2; + } else { + v2p_port1 = 1; + v2p_port2 = 1; + } + } else { /* BOND_STATE_ACTIVE */ + if (port == 1) { + v2p_port1 = 1; + v2p_port2 = 1; + } else { + v2p_port1 = 2; + v2p_port2 = 2; + } + } + } else { /* Active-Active */ + /* in active-active mode a virtual port is + * mapped to the native physical port if and only + * if the physical port is up */ + __s8 link = bonding_info->slave.link; + + if (port == 1) + v2p_port2 = 2; + else + v2p_port1 = 1; + if ((link == BOND_LINK_UP) || + (link == BOND_LINK_FAIL)) { + if (port == 1) + v2p_port1 = 1; + else + v2p_port2 = 2; + } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ + if (port == 1) + v2p_port1 = 2; + else + v2p_port2 = 1; + } + } + } + } + + mlx4_en_queue_bond_work(priv, do_bond, + v2p_port1, v2p_port2); + + return NOTIFY_DONE; +} + +void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause) +{ + int last_i = NUM_MAIN_STATS + NUM_PORT_STATS; + + if (!mlx4_is_slave(dev) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { + mutex_lock(&stats_bitmap->mutex); + bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); + + if (rx_ppp) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_PRIORITY_STATS_RX); + last_i += NUM_FLOW_PRIORITY_STATS_RX; + + if (rx_pause && !(rx_ppp)) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_STATS_RX); + last_i += NUM_FLOW_STATS_RX; + + if (tx_ppp) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_PRIORITY_STATS_TX); + last_i += NUM_FLOW_PRIORITY_STATS_TX; + + if (tx_pause && !(tx_ppp)) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_STATS_TX); + last_i += NUM_FLOW_STATS_TX; + + mutex_unlock(&stats_bitmap->mutex); + } +} + +void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause) +{ + int last_i = 0; + + mutex_init(&stats_bitmap->mutex); + bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); + + if (mlx4_is_slave(dev)) { + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_packets), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_packets), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_bytes), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_bytes), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_dropped), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_dropped), 1); + } else { + bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); + } + last_i += NUM_MAIN_STATS; + + bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); + last_i += NUM_PORT_STATS; + + mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, + rx_ppp, rx_pause, + tx_ppp, tx_pause); + last_i += NUM_FLOW_STATS; + + if (!mlx4_is_slave(dev)) + bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); +} + +int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + struct mlx4_en_port_profile *prof) +{ + struct net_device *dev; + struct mlx4_en_priv *priv; + int i; + int err; + u64 mac_u64; + + dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), + MAX_TX_RINGS, MAX_RX_RINGS); + if (dev == NULL) + return -ENOMEM; + + netif_set_real_num_tx_queues(dev, prof->tx_ring_num); + netif_set_real_num_rx_queues(dev, prof->rx_ring_num); + + SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); + dev->dev_port = port - 1; + + /* + * Initialize driver private data + */ + + priv = netdev_priv(dev); + memset(priv, 0, sizeof(struct mlx4_en_priv)); + spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); + INIT_WORK(&priv->watchdog_task, mlx4_en_restart); + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +#ifdef CONFIG_MLX4_EN_VXLAN + INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); + INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); +#endif +#ifdef CONFIG_RFS_ACCEL + INIT_LIST_HEAD(&priv->filters); + spin_lock_init(&priv->filters_lock); +#endif + + priv->dev = dev; + priv->mdev = mdev; + priv->ddev = &mdev->pdev->dev; + priv->prof = prof; + priv->port = port; + priv->port_up = false; + priv->flags = prof->flags; + priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; + priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | + MLX4_WQE_CTRL_SOLICITED); + priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; + priv->tx_ring_num = prof->tx_ring_num; + priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; + netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); + + priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, + GFP_KERNEL); + if (!priv->tx_ring) { + err = -ENOMEM; + goto out; + } + priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, + GFP_KERNEL); + if (!priv->tx_cq) { + err = -ENOMEM; + goto out; + } + priv->rx_ring_num = prof->rx_ring_num; + priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; + priv->cqe_size = mdev->dev->caps.cqe_size; + priv->mac_index = -1; + priv->msg_enable = MLX4_EN_MSG_LEVEL; +#ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { + dev->dcbnl_ops = &mlx4_en_dcbnl_ops; + } else { + en_info(priv, "enabling only PFC DCB ops\n"); + dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; + } + } +#endif + + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) + INIT_HLIST_HEAD(&priv->mac_hash[i]); + + /* Query for default mac and max mtu */ + priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; + + if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) + priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; + + /* Set default MAC */ + dev->addr_len = ETH_ALEN; + mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); + if (!is_valid_ether_addr(dev->dev_addr)) { + if (mlx4_is_slave(priv->mdev->dev)) { + eth_hw_addr_random(dev); + en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); + mac_u64 = mlx4_mac_to_u64(dev->dev_addr); + mdev->dev->caps.def_mac[priv->port] = mac_u64; + } else { + en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + priv->port, dev->dev_addr); + err = -EINVAL; + goto out; + } + } + + memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); + + priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); + err = mlx4_en_alloc_resources(priv); + if (err) + goto out; + + /* Initialize time stamping config */ + priv->hwtstamp_config.flags = 0; + priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + + /* Allocate page for receive rings */ + err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, + MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); + if (err) { + en_err(priv, "Failed to allocate page for rx qps\n"); + goto out; + } + priv->allocated = 1; + + /* + * Initialize netdev entry points + */ + if (mlx4_is_master(priv->mdev->dev)) + dev->netdev_ops = &mlx4_netdev_ops_master; + else + dev->netdev_ops = &mlx4_netdev_ops; + dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; + netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + + dev->ethtool_ops = &mlx4_en_ethtool_ops; + + /* + * Set driver features + */ + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + if (mdev->LSO_support) + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + + dev->vlan_features = dev->hw_features; + + dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; + dev->features = dev->hw_features | NETIF_F_HIGHDMA | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + dev->hw_features |= NETIF_F_LOOPBACK | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) + dev->hw_features |= NETIF_F_RXFCS; + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) + dev->hw_features |= NETIF_F_RXALL; + + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED && + mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) + dev->hw_features |= NETIF_F_NTUPLE; + + if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) + dev->priv_flags |= IFF_UNICAST_FLT; + + /* Setting a default hash function value */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { + priv->rss_hash_fn = ETH_RSS_HASH_TOP; + } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { + priv->rss_hash_fn = ETH_RSS_HASH_XOR; + } else { + en_warn(priv, + "No RSS hash capabilities exposed, using Toeplitz\n"); + priv->rss_hash_fn = ETH_RSS_HASH_TOP; + } + + mdev->pndev[port] = dev; + mdev->upper[port] = NULL; + + netif_carrier_off(dev); + mlx4_en_set_default_moderation(priv); + + en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); + en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); + + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + + /* Configure port */ + mlx4_en_calc_rx_buf(dev); + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + prof->tx_pause, prof->tx_ppp, + prof->rx_pause, prof->rx_ppp); + if (err) { + en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", + priv->port, err); + goto out; + } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); + if (err) { + en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", + err); + goto out; + } + } + + /* Init port */ + en_warn(priv, "Initializing port\n"); + err = mlx4_INIT_PORT(mdev->dev, priv->port); + if (err) { + en_err(priv, "Failed Initializing port\n"); + goto out; + } + queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + queue_delayed_work(mdev->workqueue, &priv->service_task, + SERVICE_TASK_DELAY); + + mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, + mdev->profile.prof[priv->port].rx_ppp, + mdev->profile.prof[priv->port].rx_pause, + mdev->profile.prof[priv->port].tx_ppp, + mdev->profile.prof[priv->port].tx_pause); + + err = register_netdev(dev); + if (err) { + en_err(priv, "Netdev registration failed for port %d\n", port); + goto out; + } + + priv->registered = 1; + + return 0; + +out: + mlx4_en_destroy_netdev(dev); + return err; +} + +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + if (priv->hwtstamp_config.tx_type == ts_config.tx_type && + priv->hwtstamp_config.rx_filter == ts_config.rx_filter && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) + return 0; /* Nothing to change */ + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_CTAG_RX) && + (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { + en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); + return -EINVAL; + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", + ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + priv->hwtstamp_config.tx_type = ts_config.tx_type; + priv->hwtstamp_config.rx_filter = ts_config.rx_filter; + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* RX time-stamping is OFF, update the RX vlan offload + * to the latest wanted state + */ + if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { + if (features & NETIF_F_RXFCS) + dev->features |= NETIF_F_RXFCS; + else + dev->features &= ~NETIF_F_RXFCS; + } + + /* RX vlan offload and RX time-stamping can't co-exist ! + * Regardless of the caller's choice, + * Turn Off RX vlan offload in case of time-stamping is ON + */ + if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + +out: + mutex_unlock(&mdev->state_lock); + netdev_features_change(dev); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c new file mode 100644 index 000000000..0a56f010c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + + +#include <linux/if_vlan.h> + +#include <linux/mlx4/device.h> +#include <linux/mlx4/cmd.h> + +#include "en_port.h" +#include "mlx4_en.h" + + +int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vlan_fltr_mbox *filter; + int i; + int j; + int index = 0; + u32 entry; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + filter = mailbox->buf; + for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { + entry = 0; + for (j = 0; j < 32; j++) + if (test_bit(index++, priv->active_vlans)) + entry |= 1 << j; + filter->entry[i] = cpu_to_be32(entry); + } + err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) +{ + struct mlx4_en_query_port_context *qport_context; + struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); + struct mlx4_en_port_state *state = &priv->port_state; + struct mlx4_cmd_mailbox *mailbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + qport_context = mailbox->buf; + + /* This command is always accessed from Ethtool context + * already synchronized, no need in locking */ + state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); + switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { + case MLX4_EN_100M_SPEED: + state->link_speed = SPEED_100; + break; + case MLX4_EN_1G_SPEED: + state->link_speed = SPEED_1000; + break; + case MLX4_EN_10G_SPEED_XAUI: + case MLX4_EN_10G_SPEED_XFI: + state->link_speed = SPEED_10000; + break; + case MLX4_EN_20G_SPEED: + state->link_speed = SPEED_20000; + break; + case MLX4_EN_40G_SPEED: + state->link_speed = SPEED_40000; + break; + case MLX4_EN_56G_SPEED: + state->link_speed = SPEED_56000; + break; + default: + state->link_speed = -1; + break; + } + + state->transceiver = qport_context->transceiver; + + state->flags = 0; /* Reset and recalculate the port flags */ + state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ? + MLX4_EN_PORT_ANC : 0; + state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ? + MLX4_EN_PORT_ANE : 0; + +out: + mlx4_free_cmd_mailbox(mdev->dev, mailbox); + return err; +} + +/* Each counter set is located in struct mlx4_en_stat_out_mbox + * with a const offset between its prio components. + * This function runs over a counter set and sum all of it's prio components. + */ +static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) +{ + __be64 *curr = start; + unsigned long ret = 0; + int i; + int offset = next - start; + + for (i = 0; i < num; i++) { + ret += be64_to_cpu(*curr); + curr += offset; + } + + return ret; +} + +int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) +{ + struct mlx4_en_stat_out_mbox *mlx4_en_stats; + struct mlx4_en_stat_out_flow_control_mbox *flowstats; + struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); + struct net_device_stats *stats = &priv->stats; + struct mlx4_cmd_mailbox *mailbox; + u64 in_mod = reset << 8 | port; + int err; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, + MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + mlx4_en_stats = mailbox->buf; + + spin_lock_bh(&priv->stats_lock); + + stats->rx_packets = 0; + stats->rx_bytes = 0; + priv->port_stats.rx_chksum_good = 0; + priv->port_stats.rx_chksum_none = 0; + priv->port_stats.rx_chksum_complete = 0; + for (i = 0; i < priv->rx_ring_num; i++) { + stats->rx_packets += priv->rx_ring[i]->packets; + stats->rx_bytes += priv->rx_ring[i]->bytes; + priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; + priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; + priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; + } + stats->tx_packets = 0; + stats->tx_bytes = 0; + priv->port_stats.tx_chksum_offload = 0; + priv->port_stats.queue_stopped = 0; + priv->port_stats.wake_queue = 0; + priv->port_stats.tso_packets = 0; + priv->port_stats.xmit_more = 0; + + for (i = 0; i < priv->tx_ring_num; i++) { + const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; + + stats->tx_packets += ring->packets; + stats->tx_bytes += ring->bytes; + priv->port_stats.tx_chksum_offload += ring->tx_csum; + priv->port_stats.queue_stopped += ring->queue_stopped; + priv->port_stats.wake_queue += ring->wake_queue; + priv->port_stats.tso_packets += ring->tso_packets; + priv->port_stats.xmit_more += ring->xmit_more; + } + + /* net device stats */ + stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + + be32_to_cpu(mlx4_en_stats->RJBBR) + + be32_to_cpu(mlx4_en_stats->RCRC) + + be32_to_cpu(mlx4_en_stats->RRUNT) + + be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) + + be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) + + be32_to_cpu(mlx4_en_stats->RSHORT) + + en_stats_adder(&mlx4_en_stats->RGIANT_prio_0, + &mlx4_en_stats->RGIANT_prio_1, + NUM_PRIORITIES); + stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0, + &mlx4_en_stats->TGIANT_prio_1, + NUM_PRIORITIES); + stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, + &mlx4_en_stats->MCAST_prio_1, + NUM_PRIORITIES); + stats->collisions = 0; + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); + stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); + stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); + stats->rx_frame_errors = 0; + stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->tx_aborted_errors = 0; + stats->tx_carrier_errors = 0; + stats->tx_fifo_errors = 0; + stats->tx_heartbeat_errors = 0; + stats->tx_window_errors = 0; + stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); + + /* RX stats */ + priv->pkstats.rx_multicast_packets = stats->multicast; + priv->pkstats.rx_broadcast_packets = + en_stats_adder(&mlx4_en_stats->RBCAST_prio_0, + &mlx4_en_stats->RBCAST_prio_1, + NUM_PRIORITIES); + priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR); + priv->pkstats.rx_in_range_length_error = + be64_to_cpu(mlx4_en_stats->RInRangeLengthErr); + priv->pkstats.rx_out_range_length_error = + be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr); + + /* Tx stats */ + priv->pkstats.tx_multicast_packets = + en_stats_adder(&mlx4_en_stats->TMCAST_prio_0, + &mlx4_en_stats->TMCAST_prio_1, + NUM_PRIORITIES); + priv->pkstats.tx_broadcast_packets = + en_stats_adder(&mlx4_en_stats->TBCAST_prio_0, + &mlx4_en_stats->TBCAST_prio_1, + NUM_PRIORITIES); + + priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); + priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0); + priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); + priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1); + priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); + priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2); + priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); + priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3); + priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); + priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4); + priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); + priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5); + priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); + priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6); + priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); + priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7); + priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan); + priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan); + priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); + priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0); + priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); + priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1); + priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); + priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2); + priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); + priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3); + priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); + priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4); + priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); + priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5); + priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); + priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6); + priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); + priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7); + priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); + priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); + + spin_unlock_bh(&priv->stats_lock); + + /* 0xffs indicates invalid value */ + memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { + memset(mailbox->buf, 0, + sizeof(*flowstats) * MLX4_NUM_PRIORITIES); + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, + in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, + 0, MLX4_CMD_DUMP_ETH_STATS, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + if (err) + goto out; + } + + flowstats = mailbox->buf; + + spin_lock_bh(&priv->stats_lock); + + for (i = 0; i < MLX4_NUM_PRIORITIES; i++) { + priv->rx_priority_flowstats[i].rx_pause = + be64_to_cpu(flowstats[i].rx_pause); + priv->rx_priority_flowstats[i].rx_pause_duration = + be64_to_cpu(flowstats[i].rx_pause_duration); + priv->rx_priority_flowstats[i].rx_pause_transition = + be64_to_cpu(flowstats[i].rx_pause_transition); + priv->tx_priority_flowstats[i].tx_pause = + be64_to_cpu(flowstats[i].tx_pause); + priv->tx_priority_flowstats[i].tx_pause_duration = + be64_to_cpu(flowstats[i].tx_pause_duration); + priv->tx_priority_flowstats[i].tx_pause_transition = + be64_to_cpu(flowstats[i].tx_pause_transition); + } + + /* if pfc is not in use, all priorities counters have the same value */ + priv->rx_flowstats.rx_pause = + be64_to_cpu(flowstats[0].rx_pause); + priv->rx_flowstats.rx_pause_duration = + be64_to_cpu(flowstats[0].rx_pause_duration); + priv->rx_flowstats.rx_pause_transition = + be64_to_cpu(flowstats[0].rx_pause_transition); + priv->tx_flowstats.tx_pause = + be64_to_cpu(flowstats[0].tx_pause); + priv->tx_flowstats.tx_pause_duration = + be64_to_cpu(flowstats[0].tx_pause_duration); + priv->tx_flowstats.tx_pause_transition = + be64_to_cpu(flowstats[0].tx_pause_transition); + + spin_unlock_bh(&priv->stats_lock); + +out: + mlx4_free_cmd_mailbox(mdev->dev, mailbox); + return err; +} + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h new file mode 100644 index 000000000..040da4b16 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -0,0 +1,587 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _MLX4_EN_PORT_H_ +#define _MLX4_EN_PORT_H_ + + +#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_PROMISC_SHIFT 31 +#define SET_PORT_MC_PROMISC_SHIFT 30 + +#define MLX4_EN_NUM_TC 8 + +#define VLAN_FLTR_SIZE 128 +struct mlx4_set_vlan_fltr_mbox { + __be32 entry[VLAN_FLTR_SIZE]; +}; + + +enum { + MLX4_MCAST_CONFIG = 0, + MLX4_MCAST_DISABLE = 1, + MLX4_MCAST_ENABLE = 2, +}; + +enum mlx4_link_mode { + MLX4_1000BASE_CX_SGMII = 0, + MLX4_1000BASE_KX = 1, + MLX4_10GBASE_CX4 = 2, + MLX4_10GBASE_KX4 = 3, + MLX4_10GBASE_KR = 4, + MLX4_20GBASE_KR2 = 5, + MLX4_40GBASE_CR4 = 6, + MLX4_40GBASE_KR4 = 7, + MLX4_56GBASE_KR4 = 8, + MLX4_10GBASE_CR = 12, + MLX4_10GBASE_SR = 13, + MLX4_40GBASE_SR4 = 15, + MLX4_56GBASE_CR4 = 17, + MLX4_56GBASE_SR4 = 18, + MLX4_100BASE_TX = 24, + MLX4_1000BASE_T = 25, + MLX4_10GBASE_T = 26, +}; + +#define MLX4_PROT_MASK(link_mode) (1<<link_mode) + +enum { + MLX4_EN_100M_SPEED = 0x04, + MLX4_EN_10G_SPEED_XAUI = 0x00, + MLX4_EN_10G_SPEED_XFI = 0x01, + MLX4_EN_1G_SPEED = 0x02, + MLX4_EN_20G_SPEED = 0x08, + MLX4_EN_40G_SPEED = 0x40, + MLX4_EN_56G_SPEED = 0x20, + MLX4_EN_OTHER_SPEED = 0x0f, +}; + +struct mlx4_en_query_port_context { + u8 link_up; +#define MLX4_EN_LINK_UP_MASK 0x80 +#define MLX4_EN_ANC_MASK 0x40 + u8 autoneg; +#define MLX4_EN_AUTONEG_MASK 0x80 + __be16 mtu; + u8 reserved2; + u8 link_speed; +#define MLX4_EN_SPEED_MASK 0x6f + u16 reserved3[5]; + __be64 mac; + u8 transceiver; +}; + + +struct mlx4_en_stat_out_mbox { + /* Received frames with a length of 64 octets */ + __be64 R64_prio_0; + __be64 R64_prio_1; + __be64 R64_prio_2; + __be64 R64_prio_3; + __be64 R64_prio_4; + __be64 R64_prio_5; + __be64 R64_prio_6; + __be64 R64_prio_7; + __be64 R64_novlan; + /* Received frames with a length of 127 octets */ + __be64 R127_prio_0; + __be64 R127_prio_1; + __be64 R127_prio_2; + __be64 R127_prio_3; + __be64 R127_prio_4; + __be64 R127_prio_5; + __be64 R127_prio_6; + __be64 R127_prio_7; + __be64 R127_novlan; + /* Received frames with a length of 255 octets */ + __be64 R255_prio_0; + __be64 R255_prio_1; + __be64 R255_prio_2; + __be64 R255_prio_3; + __be64 R255_prio_4; + __be64 R255_prio_5; + __be64 R255_prio_6; + __be64 R255_prio_7; + __be64 R255_novlan; + /* Received frames with a length of 511 octets */ + __be64 R511_prio_0; + __be64 R511_prio_1; + __be64 R511_prio_2; + __be64 R511_prio_3; + __be64 R511_prio_4; + __be64 R511_prio_5; + __be64 R511_prio_6; + __be64 R511_prio_7; + __be64 R511_novlan; + /* Received frames with a length of 1023 octets */ + __be64 R1023_prio_0; + __be64 R1023_prio_1; + __be64 R1023_prio_2; + __be64 R1023_prio_3; + __be64 R1023_prio_4; + __be64 R1023_prio_5; + __be64 R1023_prio_6; + __be64 R1023_prio_7; + __be64 R1023_novlan; + /* Received frames with a length of 1518 octets */ + __be64 R1518_prio_0; + __be64 R1518_prio_1; + __be64 R1518_prio_2; + __be64 R1518_prio_3; + __be64 R1518_prio_4; + __be64 R1518_prio_5; + __be64 R1518_prio_6; + __be64 R1518_prio_7; + __be64 R1518_novlan; + /* Received frames with a length of 1522 octets */ + __be64 R1522_prio_0; + __be64 R1522_prio_1; + __be64 R1522_prio_2; + __be64 R1522_prio_3; + __be64 R1522_prio_4; + __be64 R1522_prio_5; + __be64 R1522_prio_6; + __be64 R1522_prio_7; + __be64 R1522_novlan; + /* Received frames with a length of 1548 octets */ + __be64 R1548_prio_0; + __be64 R1548_prio_1; + __be64 R1548_prio_2; + __be64 R1548_prio_3; + __be64 R1548_prio_4; + __be64 R1548_prio_5; + __be64 R1548_prio_6; + __be64 R1548_prio_7; + __be64 R1548_novlan; + /* Received frames with a length of 1548 < octets < MTU */ + __be64 R2MTU_prio_0; + __be64 R2MTU_prio_1; + __be64 R2MTU_prio_2; + __be64 R2MTU_prio_3; + __be64 R2MTU_prio_4; + __be64 R2MTU_prio_5; + __be64 R2MTU_prio_6; + __be64 R2MTU_prio_7; + __be64 R2MTU_novlan; + /* Received frames with a length of MTU< octets and good CRC */ + __be64 RGIANT_prio_0; + __be64 RGIANT_prio_1; + __be64 RGIANT_prio_2; + __be64 RGIANT_prio_3; + __be64 RGIANT_prio_4; + __be64 RGIANT_prio_5; + __be64 RGIANT_prio_6; + __be64 RGIANT_prio_7; + __be64 RGIANT_novlan; + /* Received broadcast frames with good CRC */ + __be64 RBCAST_prio_0; + __be64 RBCAST_prio_1; + __be64 RBCAST_prio_2; + __be64 RBCAST_prio_3; + __be64 RBCAST_prio_4; + __be64 RBCAST_prio_5; + __be64 RBCAST_prio_6; + __be64 RBCAST_prio_7; + __be64 RBCAST_novlan; + /* Received multicast frames with good CRC */ + __be64 MCAST_prio_0; + __be64 MCAST_prio_1; + __be64 MCAST_prio_2; + __be64 MCAST_prio_3; + __be64 MCAST_prio_4; + __be64 MCAST_prio_5; + __be64 MCAST_prio_6; + __be64 MCAST_prio_7; + __be64 MCAST_novlan; + /* Received unicast not short or GIANT frames with good CRC */ + __be64 RTOTG_prio_0; + __be64 RTOTG_prio_1; + __be64 RTOTG_prio_2; + __be64 RTOTG_prio_3; + __be64 RTOTG_prio_4; + __be64 RTOTG_prio_5; + __be64 RTOTG_prio_6; + __be64 RTOTG_prio_7; + __be64 RTOTG_novlan; + + /* Count of total octets of received frames, includes framing characters */ + __be64 RTTLOCT_prio_0; + /* Count of total octets of received frames, not including framing + characters */ + __be64 RTTLOCT_NOFRM_prio_0; + /* Count of Total number of octets received + (only for frames without errors) */ + __be64 ROCT_prio_0; + + __be64 RTTLOCT_prio_1; + __be64 RTTLOCT_NOFRM_prio_1; + __be64 ROCT_prio_1; + + __be64 RTTLOCT_prio_2; + __be64 RTTLOCT_NOFRM_prio_2; + __be64 ROCT_prio_2; + + __be64 RTTLOCT_prio_3; + __be64 RTTLOCT_NOFRM_prio_3; + __be64 ROCT_prio_3; + + __be64 RTTLOCT_prio_4; + __be64 RTTLOCT_NOFRM_prio_4; + __be64 ROCT_prio_4; + + __be64 RTTLOCT_prio_5; + __be64 RTTLOCT_NOFRM_prio_5; + __be64 ROCT_prio_5; + + __be64 RTTLOCT_prio_6; + __be64 RTTLOCT_NOFRM_prio_6; + __be64 ROCT_prio_6; + + __be64 RTTLOCT_prio_7; + __be64 RTTLOCT_NOFRM_prio_7; + __be64 ROCT_prio_7; + + __be64 RTTLOCT_novlan; + __be64 RTTLOCT_NOFRM_novlan; + __be64 ROCT_novlan; + + /* Count of Total received frames including bad frames */ + __be64 RTOT_prio_0; + /* Count of Total number of received frames with 802.1Q encapsulation */ + __be64 R1Q_prio_0; + __be64 reserved1; + + __be64 RTOT_prio_1; + __be64 R1Q_prio_1; + __be64 reserved2; + + __be64 RTOT_prio_2; + __be64 R1Q_prio_2; + __be64 reserved3; + + __be64 RTOT_prio_3; + __be64 R1Q_prio_3; + __be64 reserved4; + + __be64 RTOT_prio_4; + __be64 R1Q_prio_4; + __be64 reserved5; + + __be64 RTOT_prio_5; + __be64 R1Q_prio_5; + __be64 reserved6; + + __be64 RTOT_prio_6; + __be64 R1Q_prio_6; + __be64 reserved7; + + __be64 RTOT_prio_7; + __be64 R1Q_prio_7; + __be64 reserved8; + + __be64 RTOT_novlan; + __be64 R1Q_novlan; + __be64 reserved9; + + /* Total number of Successfully Received Control Frames */ + __be64 RCNTL; + __be64 reserved10; + __be64 reserved11; + __be64 reserved12; + /* Count of received frames with a length/type field value between 46 + (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames), + inclusive */ + __be64 RInRangeLengthErr; + /* Count of received frames with length/type field between 1501 and 1535 + decimal, inclusive */ + __be64 ROutRangeLengthErr; + /* Count of received frames that are longer than max allowed size for + 802.3 frames (1518/1522) */ + __be64 RFrmTooLong; + /* Count frames received with PCS error */ + __be64 PCS; + + /* Transmit frames with a length of 64 octets */ + __be64 T64_prio_0; + __be64 T64_prio_1; + __be64 T64_prio_2; + __be64 T64_prio_3; + __be64 T64_prio_4; + __be64 T64_prio_5; + __be64 T64_prio_6; + __be64 T64_prio_7; + __be64 T64_novlan; + __be64 T64_loopbk; + /* Transmit frames with a length of 65 to 127 octets. */ + __be64 T127_prio_0; + __be64 T127_prio_1; + __be64 T127_prio_2; + __be64 T127_prio_3; + __be64 T127_prio_4; + __be64 T127_prio_5; + __be64 T127_prio_6; + __be64 T127_prio_7; + __be64 T127_novlan; + __be64 T127_loopbk; + /* Transmit frames with a length of 128 to 255 octets */ + __be64 T255_prio_0; + __be64 T255_prio_1; + __be64 T255_prio_2; + __be64 T255_prio_3; + __be64 T255_prio_4; + __be64 T255_prio_5; + __be64 T255_prio_6; + __be64 T255_prio_7; + __be64 T255_novlan; + __be64 T255_loopbk; + /* Transmit frames with a length of 256 to 511 octets */ + __be64 T511_prio_0; + __be64 T511_prio_1; + __be64 T511_prio_2; + __be64 T511_prio_3; + __be64 T511_prio_4; + __be64 T511_prio_5; + __be64 T511_prio_6; + __be64 T511_prio_7; + __be64 T511_novlan; + __be64 T511_loopbk; + /* Transmit frames with a length of 512 to 1023 octets */ + __be64 T1023_prio_0; + __be64 T1023_prio_1; + __be64 T1023_prio_2; + __be64 T1023_prio_3; + __be64 T1023_prio_4; + __be64 T1023_prio_5; + __be64 T1023_prio_6; + __be64 T1023_prio_7; + __be64 T1023_novlan; + __be64 T1023_loopbk; + /* Transmit frames with a length of 1024 to 1518 octets */ + __be64 T1518_prio_0; + __be64 T1518_prio_1; + __be64 T1518_prio_2; + __be64 T1518_prio_3; + __be64 T1518_prio_4; + __be64 T1518_prio_5; + __be64 T1518_prio_6; + __be64 T1518_prio_7; + __be64 T1518_novlan; + __be64 T1518_loopbk; + /* Counts transmit frames with a length of 1519 to 1522 bytes */ + __be64 T1522_prio_0; + __be64 T1522_prio_1; + __be64 T1522_prio_2; + __be64 T1522_prio_3; + __be64 T1522_prio_4; + __be64 T1522_prio_5; + __be64 T1522_prio_6; + __be64 T1522_prio_7; + __be64 T1522_novlan; + __be64 T1522_loopbk; + /* Transmit frames with a length of 1523 to 1548 octets */ + __be64 T1548_prio_0; + __be64 T1548_prio_1; + __be64 T1548_prio_2; + __be64 T1548_prio_3; + __be64 T1548_prio_4; + __be64 T1548_prio_5; + __be64 T1548_prio_6; + __be64 T1548_prio_7; + __be64 T1548_novlan; + __be64 T1548_loopbk; + /* Counts transmit frames with a length of 1549 to MTU bytes */ + __be64 T2MTU_prio_0; + __be64 T2MTU_prio_1; + __be64 T2MTU_prio_2; + __be64 T2MTU_prio_3; + __be64 T2MTU_prio_4; + __be64 T2MTU_prio_5; + __be64 T2MTU_prio_6; + __be64 T2MTU_prio_7; + __be64 T2MTU_novlan; + __be64 T2MTU_loopbk; + /* Transmit frames with a length greater than MTU octets and a good CRC. */ + __be64 TGIANT_prio_0; + __be64 TGIANT_prio_1; + __be64 TGIANT_prio_2; + __be64 TGIANT_prio_3; + __be64 TGIANT_prio_4; + __be64 TGIANT_prio_5; + __be64 TGIANT_prio_6; + __be64 TGIANT_prio_7; + __be64 TGIANT_novlan; + __be64 TGIANT_loopbk; + /* Transmit broadcast frames with a good CRC */ + __be64 TBCAST_prio_0; + __be64 TBCAST_prio_1; + __be64 TBCAST_prio_2; + __be64 TBCAST_prio_3; + __be64 TBCAST_prio_4; + __be64 TBCAST_prio_5; + __be64 TBCAST_prio_6; + __be64 TBCAST_prio_7; + __be64 TBCAST_novlan; + __be64 TBCAST_loopbk; + /* Transmit multicast frames with a good CRC */ + __be64 TMCAST_prio_0; + __be64 TMCAST_prio_1; + __be64 TMCAST_prio_2; + __be64 TMCAST_prio_3; + __be64 TMCAST_prio_4; + __be64 TMCAST_prio_5; + __be64 TMCAST_prio_6; + __be64 TMCAST_prio_7; + __be64 TMCAST_novlan; + __be64 TMCAST_loopbk; + /* Transmit good frames that are neither broadcast nor multicast */ + __be64 TTOTG_prio_0; + __be64 TTOTG_prio_1; + __be64 TTOTG_prio_2; + __be64 TTOTG_prio_3; + __be64 TTOTG_prio_4; + __be64 TTOTG_prio_5; + __be64 TTOTG_prio_6; + __be64 TTOTG_prio_7; + __be64 TTOTG_novlan; + __be64 TTOTG_loopbk; + + /* total octets of transmitted frames, including framing characters */ + __be64 TTTLOCT_prio_0; + /* total octets of transmitted frames, not including framing characters */ + __be64 TTTLOCT_NOFRM_prio_0; + /* ifOutOctets */ + __be64 TOCT_prio_0; + + __be64 TTTLOCT_prio_1; + __be64 TTTLOCT_NOFRM_prio_1; + __be64 TOCT_prio_1; + + __be64 TTTLOCT_prio_2; + __be64 TTTLOCT_NOFRM_prio_2; + __be64 TOCT_prio_2; + + __be64 TTTLOCT_prio_3; + __be64 TTTLOCT_NOFRM_prio_3; + __be64 TOCT_prio_3; + + __be64 TTTLOCT_prio_4; + __be64 TTTLOCT_NOFRM_prio_4; + __be64 TOCT_prio_4; + + __be64 TTTLOCT_prio_5; + __be64 TTTLOCT_NOFRM_prio_5; + __be64 TOCT_prio_5; + + __be64 TTTLOCT_prio_6; + __be64 TTTLOCT_NOFRM_prio_6; + __be64 TOCT_prio_6; + + __be64 TTTLOCT_prio_7; + __be64 TTTLOCT_NOFRM_prio_7; + __be64 TOCT_prio_7; + + __be64 TTTLOCT_novlan; + __be64 TTTLOCT_NOFRM_novlan; + __be64 TOCT_novlan; + + __be64 TTTLOCT_loopbk; + __be64 TTTLOCT_NOFRM_loopbk; + __be64 TOCT_loopbk; + + /* Total frames transmitted with a good CRC that are not aborted */ + __be64 TTOT_prio_0; + /* Total number of frames transmitted with 802.1Q encapsulation */ + __be64 T1Q_prio_0; + __be64 reserved13; + + __be64 TTOT_prio_1; + __be64 T1Q_prio_1; + __be64 reserved14; + + __be64 TTOT_prio_2; + __be64 T1Q_prio_2; + __be64 reserved15; + + __be64 TTOT_prio_3; + __be64 T1Q_prio_3; + __be64 reserved16; + + __be64 TTOT_prio_4; + __be64 T1Q_prio_4; + __be64 reserved17; + + __be64 TTOT_prio_5; + __be64 T1Q_prio_5; + __be64 reserved18; + + __be64 TTOT_prio_6; + __be64 T1Q_prio_6; + __be64 reserved19; + + __be64 TTOT_prio_7; + __be64 T1Q_prio_7; + __be64 reserved20; + + __be64 TTOT_novlan; + __be64 T1Q_novlan; + __be64 reserved21; + + __be64 TTOT_loopbk; + __be64 T1Q_loopbk; + __be64 reserved22; + + /* Received frames with a length greater than MTU octets and a bad CRC */ + __be32 RJBBR; + /* Received frames with a bad CRC that are not runts, jabbers, + or alignment errors */ + __be32 RCRC; + /* Received frames with SFD with a length of less than 64 octets and a + bad CRC */ + __be32 RRUNT; + /* Received frames with a length less than 64 octets and a good CRC */ + __be32 RSHORT; + /* Total Number of Received Packets Dropped */ + __be32 RDROP; + /* Drop due to overflow */ + __be32 RdropOvflw; + /* Drop due to overflow */ + __be32 RdropLength; + /* Total of good frames. Does not include frames received with + frame-too-long, FCS, or length errors */ + __be32 RTOTFRMS; + /* Total dropped Xmited packets */ + __be32 TDROP; +}; + + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c new file mode 100644 index 000000000..34f2fdf4f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mlx4/qp.h> + +#include "mlx4_en.h" + +void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, + int is_tx, int rss, int qpn, int cqn, + int user_prio, struct mlx4_qp_context *context) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + + memset(context, 0, sizeof *context); + context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); + context->pd = cpu_to_be32(mdev->priv_pdn); + context->mtu_msgmax = 0xff; + if (!is_tx && !rss) + context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); + if (is_tx) { + context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP) + context->params2 |= MLX4_QP_BIT_FPP; + + } else { + context->sq_size_stride = ilog2(TXBB_SIZE) - 4; + } + context->usr_page = cpu_to_be32(mdev->priv_uar.index); + context->local_qpn = cpu_to_be32(qpn); + context->pri_path.ackto = 1 & 0x07; + context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; + if (user_prio >= 0) { + context->pri_path.sched_queue |= user_prio << 3; + context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; + } + context->pri_path.counter_index = 0xff; + context->cqn_send = cpu_to_be32(cqn); + context->cqn_recv = cpu_to_be32(cqn); + context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); + if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) + context->param3 |= cpu_to_be32(1 << 30); + + if (!is_tx && !rss && + (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) { + en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); + context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ + } +} + + +int mlx4_en_map_buffer(struct mlx4_buf *buf) +{ + struct page **pages; + int i; + + if (BITS_PER_LONG == 64 || buf->nbufs == 1) + return 0; + + pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; ++i) + pages[i] = virt_to_page(buf->page_list[i].buf); + + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + return -ENOMEM; + + return 0; +} + +void mlx4_en_unmap_buffer(struct mlx4_buf *buf) +{ + if (BITS_PER_LONG == 64 || buf->nbufs == 1) + return; + + vunmap(buf->direct.buf); +} + +void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) +{ + return; +} + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c new file mode 100644 index 000000000..eab4e080e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -0,0 +1,1307 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <net/busy_poll.h> +#include <linux/mlx4/cq.h> +#include <linux/slab.h> +#include <linux/mlx4/qp.h> +#include <linux/skbuff.h> +#include <linux/rculist.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/vmalloc.h> +#include <linux/irq.h> + +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_checksum.h> +#endif + +#include "mlx4_en.h" + +static int mlx4_alloc_pages(struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *page_alloc, + const struct mlx4_en_frag_info *frag_info, + gfp_t _gfp) +{ + int order; + struct page *page; + dma_addr_t dma; + + for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { + gfp_t gfp = _gfp; + + if (order) + gfp |= __GFP_COMP | __GFP_NOWARN; + page = alloc_pages(gfp, order); + if (likely(page)) + break; + if (--order < 0 || + ((PAGE_SIZE << order) < frag_info->frag_size)) + return -ENOMEM; + } + dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(priv->ddev, dma)) { + put_page(page); + return -ENOMEM; + } + page_alloc->page_size = PAGE_SIZE << order; + page_alloc->page = page; + page_alloc->dma = dma; + page_alloc->page_offset = 0; + /* Not doing get_page() for each frag is a big win + * on asymetric workloads. Note we can not use atomic_set(). + */ + atomic_add(page_alloc->page_size / frag_info->frag_stride - 1, + &page->_count); + return 0; +} + +static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, + struct mlx4_en_rx_desc *rx_desc, + struct mlx4_en_rx_alloc *frags, + struct mlx4_en_rx_alloc *ring_alloc, + gfp_t gfp) +{ + struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; + const struct mlx4_en_frag_info *frag_info; + struct page *page; + dma_addr_t dma; + int i; + + for (i = 0; i < priv->num_frags; i++) { + frag_info = &priv->frag_info[i]; + page_alloc[i] = ring_alloc[i]; + page_alloc[i].page_offset += frag_info->frag_stride; + + if (page_alloc[i].page_offset + frag_info->frag_stride <= + ring_alloc[i].page_size) + continue; + + if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) + goto out; + } + + for (i = 0; i < priv->num_frags; i++) { + frags[i] = ring_alloc[i]; + dma = ring_alloc[i].dma + ring_alloc[i].page_offset; + ring_alloc[i] = page_alloc[i]; + rx_desc->data[i].addr = cpu_to_be64(dma); + } + + return 0; + +out: + while (i--) { + if (page_alloc[i].page != ring_alloc[i].page) { + dma_unmap_page(priv->ddev, page_alloc[i].dma, + page_alloc[i].page_size, PCI_DMA_FROMDEVICE); + page = page_alloc[i].page; + atomic_set(&page->_count, 1); + put_page(page); + } + } + return -ENOMEM; +} + +static void mlx4_en_free_frag(struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *frags, + int i) +{ + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; + + + if (next_frag_end > frags[i].page_size) + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, + PCI_DMA_FROMDEVICE); + + if (frags[i].page) + put_page(frags[i].page); +} + +static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + int i; + struct mlx4_en_rx_alloc *page_alloc; + + for (i = 0; i < priv->num_frags; i++) { + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + + if (mlx4_alloc_pages(priv, &ring->page_alloc[i], + frag_info, GFP_KERNEL | __GFP_COLD)) + goto out; + + en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", + i, ring->page_alloc[i].page_size, + atomic_read(&ring->page_alloc[i].page->_count)); + } + return 0; + +out: + while (i--) { + struct page *page; + + page_alloc = &ring->page_alloc[i]; + dma_unmap_page(priv->ddev, page_alloc->dma, + page_alloc->page_size, PCI_DMA_FROMDEVICE); + page = page_alloc->page; + atomic_set(&page->_count, 1); + put_page(page); + page_alloc->page = NULL; + } + return -ENOMEM; +} + +static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + struct mlx4_en_rx_alloc *page_alloc; + int i; + + for (i = 0; i < priv->num_frags; i++) { + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + + page_alloc = &ring->page_alloc[i]; + en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", + i, page_count(page_alloc->page)); + + dma_unmap_page(priv->ddev, page_alloc->dma, + page_alloc->page_size, PCI_DMA_FROMDEVICE); + while (page_alloc->page_offset + frag_info->frag_stride < + page_alloc->page_size) { + put_page(page_alloc->page); + page_alloc->page_offset += frag_info->frag_stride; + } + page_alloc->page = NULL; + } +} + +static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, int index) +{ + struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; + int possible_frags; + int i; + + /* Set size and memtype fields */ + for (i = 0; i < priv->num_frags; i++) { + rx_desc->data[i].byte_count = + cpu_to_be32(priv->frag_info[i].frag_size); + rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); + } + + /* If the number of used fragments does not fill up the ring stride, + * remaining (unused) fragments must be padded with null address/size + * and a special memory key */ + possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; + for (i = priv->num_frags; i < possible_frags; i++) { + rx_desc->data[i].byte_count = 0; + rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); + rx_desc->data[i].addr = 0; + } +} + +static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, int index, + gfp_t gfp) +{ + struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); + struct mlx4_en_rx_alloc *frags = ring->rx_info + + (index << priv->log_rx_info); + + return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); +} + +static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) +{ + BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); + return ring->prod == ring->cons; +} + +static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) +{ + *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); +} + +static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, + int index) +{ + struct mlx4_en_rx_alloc *frags; + int nr; + + frags = ring->rx_info + (index << priv->log_rx_info); + for (nr = 0; nr < priv->num_frags; nr++) { + en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); + mlx4_en_free_frag(priv, frags, nr); + } +} + +static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) +{ + struct mlx4_en_rx_ring *ring; + int ring_ind; + int buf_ind; + int new_size; + + for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + + if (mlx4_en_prepare_rx_desc(priv, ring, + ring->actual_size, + GFP_KERNEL | __GFP_COLD)) { + if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { + en_err(priv, "Failed to allocate enough rx buffers\n"); + return -ENOMEM; + } else { + new_size = rounddown_pow_of_two(ring->actual_size); + en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", + ring->actual_size, new_size); + goto reduce_rings; + } + } + ring->actual_size++; + ring->prod++; + } + } + return 0; + +reduce_rings: + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + while (ring->actual_size > new_size) { + ring->actual_size--; + ring->prod--; + mlx4_en_free_rx_desc(priv, ring, ring->actual_size); + } + } + + return 0; +} + +static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + int index; + + en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", + ring->cons, ring->prod); + + /* Unmap and free Rx buffers */ + while (!mlx4_en_is_ring_empty(ring)) { + index = ring->cons & ring->size_mask; + en_dbg(DRV, priv, "Processing descriptor:%d\n", index); + mlx4_en_free_rx_desc(priv, ring, index); + ++ring->cons; + } +} + +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) +{ + int i; + int num_of_eqs; + int num_rx_rings; + struct mlx4_dev *dev = mdev->dev; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + if (!dev->caps.comp_pool) + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + dev->caps.num_comp_vectors, + DEF_RX_RINGS)); + else + num_of_eqs = min_t(int, MAX_MSIX_P_PORT, + dev->caps.comp_pool/ + dev->caps.num_ports) - 1; + + num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : + min_t(int, num_of_eqs, + netif_get_num_default_rss_queues()); + mdev->profile.prof[i].rx_ring_num = + rounddown_pow_of_two(num_rx_rings); + } +} + +int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride, int node) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rx_ring *ring; + int err = -ENOMEM; + int tmp; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); + if (!ring) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + en_err(priv, "Failed to allocate RX ring structure\n"); + return -ENOMEM; + } + } + + ring->prod = 0; + ring->cons = 0; + ring->size = size; + ring->size_mask = size - 1; + ring->stride = stride; + ring->log_stride = ffs(ring->stride) - 1; + ring->buf_size = ring->size * ring->stride + TXBB_SIZE; + + tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * + sizeof(struct mlx4_en_rx_alloc)); + ring->rx_info = vmalloc_node(tmp, node); + if (!ring->rx_info) { + ring->rx_info = vmalloc(tmp); + if (!ring->rx_info) { + err = -ENOMEM; + goto err_ring; + } + } + + en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", + ring->rx_info, tmp); + + /* Allocate HW buffers on provided NUMA node */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, + ring->buf_size, 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); + if (err) + goto err_info; + + err = mlx4_en_map_buffer(&ring->wqres.buf); + if (err) { + en_err(priv, "Failed to map RX buffer\n"); + goto err_hwq; + } + ring->buf = ring->wqres.buf.direct.buf; + + ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; + + *pring = ring; + return 0; + +err_hwq: + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); +err_info: + vfree(ring->rx_info); + ring->rx_info = NULL; +err_ring: + kfree(ring); + *pring = NULL; + + return err; +} + +int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) +{ + struct mlx4_en_rx_ring *ring; + int i; + int ring_ind; + int err; + int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + + DS_SIZE * priv->num_frags); + + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + + ring->prod = 0; + ring->cons = 0; + ring->actual_size = 0; + ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; + + ring->stride = stride; + if (ring->stride <= TXBB_SIZE) + ring->buf += TXBB_SIZE; + + ring->log_stride = ffs(ring->stride) - 1; + ring->buf_size = ring->size * ring->stride; + + memset(ring->buf, 0, ring->buf_size); + mlx4_en_update_rx_prod_db(ring); + + /* Initialize all descriptors */ + for (i = 0; i < ring->size; i++) + mlx4_en_init_rx_desc(priv, ring, i); + + /* Initialize page allocators */ + err = mlx4_en_init_allocator(priv, ring); + if (err) { + en_err(priv, "Failed initializing ring allocator\n"); + if (ring->stride <= TXBB_SIZE) + ring->buf -= TXBB_SIZE; + ring_ind--; + goto err_allocator; + } + } + err = mlx4_en_fill_rx_buffers(priv); + if (err) + goto err_buffers; + + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + + ring->size_mask = ring->actual_size - 1; + mlx4_en_update_rx_prod_db(ring); + } + + return 0; + +err_buffers: + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) + mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); + + ring_ind = priv->rx_ring_num - 1; +err_allocator: + while (ring_ind >= 0) { + if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) + priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; + mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]); + ring_ind--; + } + return err; +} + +/* We recover from out of memory by scheduling our napi poll + * function (mlx4_en_process_cq), which tries to allocate + * all missing RX buffers (call to mlx4_en_refill_rx_buffers). + */ +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) +{ + int ring; + + if (!priv->port_up) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) + napi_reschedule(&priv->rx_cq[ring]->napi); + } +} + +void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rx_ring *ring = *pring; + + mlx4_en_unmap_buffer(&ring->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); + vfree(ring->rx_info); + ring->rx_info = NULL; + kfree(ring); + *pring = NULL; +#ifdef CONFIG_RFS_ACCEL + mlx4_en_cleanup_filters(priv); +#endif +} + +void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + mlx4_en_free_rx_buf(priv, ring); + if (ring->stride <= TXBB_SIZE) + ring->buf -= TXBB_SIZE; + mlx4_en_destroy_allocator(priv, ring); +} + + +static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_desc *rx_desc, + struct mlx4_en_rx_alloc *frags, + struct sk_buff *skb, + int length) +{ + struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; + struct mlx4_en_frag_info *frag_info; + int nr; + dma_addr_t dma; + + /* Collect used fragments while replacing them in the HW descriptors */ + for (nr = 0; nr < priv->num_frags; nr++) { + frag_info = &priv->frag_info[nr]; + if (length <= frag_info->frag_prefix_size) + break; + if (!frags[nr].page) + goto fail; + + dma = be64_to_cpu(rx_desc->data[nr].addr); + dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, + DMA_FROM_DEVICE); + + /* Save page reference in skb */ + __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); + skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); + skb_frags_rx[nr].page_offset = frags[nr].page_offset; + skb->truesize += frag_info->frag_stride; + frags[nr].page = NULL; + } + /* Adjust size of last fragment to match actual length */ + if (nr > 0) + skb_frag_size_set(&skb_frags_rx[nr - 1], + length - priv->frag_info[nr - 1].frag_prefix_size); + return nr; + +fail: + while (nr > 0) { + nr--; + __skb_frag_unref(&skb_frags_rx[nr]); + } + return 0; +} + + +static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, + struct mlx4_en_rx_desc *rx_desc, + struct mlx4_en_rx_alloc *frags, + unsigned int length) +{ + struct sk_buff *skb; + void *va; + int used_frags; + dma_addr_t dma; + + skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); + if (!skb) { + en_dbg(RX_ERR, priv, "Failed allocating skb\n"); + return NULL; + } + skb_reserve(skb, NET_IP_ALIGN); + skb->len = length; + + /* Get pointer to first fragment so we could copy the headers into the + * (linear part of the) skb */ + va = page_address(frags[0].page) + frags[0].page_offset; + + if (length <= SMALL_PACKET_SIZE) { + /* We are copying all relevant data to the skb - temporarily + * sync buffers for the copy */ + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, length, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, va, length); + skb->tail += length; + } else { + unsigned int pull_len; + + /* Move relevant fragments to skb */ + used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, + skb, length); + if (unlikely(!used_frags)) { + kfree_skb(skb); + return NULL; + } + skb_shinfo(skb)->nr_frags = used_frags; + + pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); + /* Copy headers into the skb linear buffer */ + memcpy(skb->data, va, pull_len); + skb->tail += pull_len; + + /* Skip headers in first fragment */ + skb_shinfo(skb)->frags[0].page_offset += pull_len; + + /* Adjust size of first fragment */ + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); + skb->data_len = length - pull_len; + } + return skb; +} + +static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) +{ + int i; + int offset = ETH_HLEN; + + for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { + if (*(skb->data + offset) != (unsigned char) (i & 0xff)) + goto out_loopback; + } + /* Loopback found */ + priv->loopback_ok = 1; + +out_loopback: + dev_kfree_skb_any(skb); +} + +static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + int index = ring->prod & ring->size_mask; + + while ((u32) (ring->prod - ring->cons) < ring->actual_size) { + if (mlx4_en_prepare_rx_desc(priv, ring, index, + GFP_ATOMIC | __GFP_COLD)) + break; + ring->prod++; + index = ring->prod & ring->size_mask; + } +} + +/* When hardware doesn't strip the vlan, we need to calculate the checksum + * over it and add it to the hardware's checksum calculation + */ +static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, + struct vlan_hdr *vlanh) +{ + return csum_add(hw_checksum, *(__wsum *)vlanh); +} + +/* Although the stack expects checksum which doesn't include the pseudo + * header, the HW adds it. To address that, we are subtracting the pseudo + * header checksum from the checksum value provided by the HW. + */ +static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, + struct iphdr *iph) +{ + __u16 length_for_csum = 0; + __wsum csum_pseudo_header = 0; + + length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); + csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, + length_for_csum, iph->protocol, 0); + skb->csum = csum_sub(hw_checksum, csum_pseudo_header); +} + +#if IS_ENABLED(CONFIG_IPV6) +/* In IPv6 packets, besides subtracting the pseudo header checksum, + * we also compute/add the IP header checksum which + * is not added by the HW. + */ +static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, + struct ipv6hdr *ipv6h) +{ + __wsum csum_pseudo_hdr = 0; + + if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) + return -1; + hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); + + csum_pseudo_hdr = csum_partial(&ipv6h->saddr, + sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); + + skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); + skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); + return 0; +} +#endif +static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, + netdev_features_t dev_features) +{ + __wsum hw_checksum = 0; + + void *hdr = (u8 *)va + sizeof(struct ethhdr); + + hw_checksum = csum_unfold((__force __sum16)cqe->checksum); + + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) && + !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { + hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); + hdr += sizeof(struct vlan_hdr); + } + + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) + get_fixed_ipv4_csum(hw_checksum, skb, hdr); +#if IS_ENABLED(CONFIG_IPV6) + else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) + if (get_fixed_ipv6_csum(hw_checksum, skb, hdr)) + return -1; +#endif + return 0; +} + +int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_cqe *cqe; + struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; + struct mlx4_en_rx_alloc *frags; + struct mlx4_en_rx_desc *rx_desc; + struct sk_buff *skb; + int index; + int nr; + unsigned int length; + int polled = 0; + int ip_summed; + int factor = priv->cqe_factor; + u64 timestamp; + bool l2_tunnel; + + if (!priv->port_up) + return 0; + + if (budget <= 0) + return polled; + + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx + * descriptor offset can be deduced from the CQE index instead of + * reading 'cqe->index' */ + index = cq->mcq.cons_index & ring->size_mask; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; + + /* Process all completed CQEs */ + while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, + cq->mcq.cons_index & cq->size)) { + + frags = ring->rx_info + (index << priv->log_rx_info); + rx_desc = ring->buf + (index << ring->log_stride); + + /* + * make sure we read the CQE after we read the ownership bit + */ + dma_rmb(); + + /* Drop packet on bad receive or bad checksum */ + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + MLX4_CQE_OPCODE_ERROR)) { + en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", + ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, + ((struct mlx4_err_cqe *)cqe)->syndrome); + goto next; + } + if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { + en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); + goto next; + } + + /* Check if we need to drop the packet if SRIOV is not enabled + * and not performing the selftest or flb disabled + */ + if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { + struct ethhdr *ethh; + dma_addr_t dma; + /* Get pointer to first fragment since we haven't + * skb yet and cast it to ethhdr struct + */ + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), + DMA_FROM_DEVICE); + ethh = (struct ethhdr *)(page_address(frags[0].page) + + frags[0].page_offset); + + if (is_multicast_ether_addr(ethh->h_dest)) { + struct mlx4_mac_entry *entry; + struct hlist_head *bucket; + unsigned int mac_hash; + + /* Drop the packet, since HW loopback-ed it */ + mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, + ethh->h_source)) { + rcu_read_unlock(); + goto next; + } + } + rcu_read_unlock(); + } + } + + /* + * Packet is OK - process it. + */ + length = be32_to_cpu(cqe->byte_cnt); + length -= ring->fcs_del; + ring->bytes += length; + ring->packets++; + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + + if (likely(dev->features & NETIF_F_RXCSUM)) { + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | + MLX4_CQE_STATUS_UDP)) { + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff)) { + ip_summed = CHECKSUM_UNNECESSARY; + ring->csum_ok++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } else { + if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | + MLX4_CQE_STATUS_IPV6))) { + ip_summed = CHECKSUM_COMPLETE; + ring->csum_complete++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + + /* This packet is eligible for GRO if it is: + * - DIX Ethernet (type interpretation) + * - TCP/IP (v4) + * - without IP options + * - not an IP fragment + * - no LLS polling in progress + */ + if (!mlx4_en_cq_busy_polling(cq) && + (dev->features & NETIF_F_GRO)) { + struct sk_buff *gro_skb = napi_get_frags(&cq->napi); + if (!gro_skb) + goto next; + + nr = mlx4_en_complete_rx_desc(priv, + rx_desc, frags, gro_skb, + length); + if (!nr) + goto next; + + if (ip_summed == CHECKSUM_COMPLETE) { + void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); + if (check_csum(cqe, gro_skb, va, + dev->features)) { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + ring->csum_complete--; + } + } + + skb_shinfo(gro_skb)->nr_frags = nr; + gro_skb->len = length; + gro_skb->data_len = length; + gro_skb->ip_summed = ip_summed; + + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + gro_skb->csum_level = 1; + + if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + u16 vid = be16_to_cpu(cqe->sl_vid); + + __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); + } + + if (dev->features & NETIF_F_RXHASH) + skb_set_hash(gro_skb, + be32_to_cpu(cqe->immed_rss_invalid), + PKT_HASH_TYPE_L3); + + skb_record_rx_queue(gro_skb, cq->ring); + skb_mark_napi_id(gro_skb, &cq->napi); + + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, + skb_hwtstamps(gro_skb), + timestamp); + } + + napi_gro_frags(&cq->napi); + goto next; + } + + /* GRO not possible, complete processing here */ + skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); + if (!skb) { + priv->stats.rx_dropped++; + goto next; + } + + if (unlikely(priv->validate_loopback)) { + validate_loopback(priv, skb); + goto next; + } + + if (ip_summed == CHECKSUM_COMPLETE) { + if (check_csum(cqe, skb, skb->data, dev->features)) { + ip_summed = CHECKSUM_NONE; + ring->csum_complete--; + ring->csum_none++; + } + } + + skb->ip_summed = ip_summed; + skb->protocol = eth_type_trans(skb, dev); + skb_record_rx_queue(skb, cq->ring); + + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_level = 1; + + if (dev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + be32_to_cpu(cqe->immed_rss_invalid), + PKT_HASH_TYPE_L3); + + if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_VLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); + + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), + timestamp); + } + + skb_mark_napi_id(skb, &cq->napi); + + if (!mlx4_en_cq_busy_polling(cq)) + napi_gro_receive(&cq->napi, skb); + else + netif_receive_skb(skb); + +next: + for (nr = 0; nr < priv->num_frags; nr++) + mlx4_en_free_frag(priv, frags, nr); + + ++cq->mcq.cons_index; + index = (cq->mcq.cons_index) & ring->size_mask; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; + if (++polled == budget) + goto out; + } + +out: + AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); + mlx4_cq_set_ci(&cq->mcq); + wmb(); /* ensure HW sees CQ consumer before we post new buffers */ + ring->cons = cq->mcq.cons_index; + mlx4_en_refill_rx_buffers(priv, ring); + mlx4_en_update_rx_prod_db(ring); + return polled; +} + + +void mlx4_en_rx_irq(struct mlx4_cq *mcq) +{ + struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); + else + mlx4_en_arm_cq(priv, cq); +} + +/* Rx CQ polling - called by NAPI */ +int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + int done; + + if (!mlx4_en_cq_lock_napi(cq)) + return budget; + + done = mlx4_en_process_rx_cq(dev, cq, budget); + + mlx4_en_cq_unlock_napi(cq); + + /* If we used up all the quota - we're probably not done yet... */ + if (done == budget) { + int cpu_curr; + const struct cpumask *aff; + + INC_PERF_COUNTER(priv->pstats.napi_quota); + + cpu_curr = smp_processor_id(); + aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; + + if (likely(cpumask_test_cpu(cpu_curr, aff))) + return budget; + + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. need to stop this NAPI + * poll, and restart it on the right CPU + */ + done = 0; + } + /* Done for now */ + napi_complete_done(napi, done); + mlx4_en_arm_cq(priv, cq); + return done; +} + +static const int frag_sizes[] = { + FRAG_SZ0, + FRAG_SZ1, + FRAG_SZ2, + FRAG_SZ3 +}; + +void mlx4_en_calc_rx_buf(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN; + int buf_size = 0; + int i = 0; + + while (buf_size < eff_mtu) { + priv->frag_info[i].frag_size = + (eff_mtu > buf_size + frag_sizes[i]) ? + frag_sizes[i] : eff_mtu - buf_size; + priv->frag_info[i].frag_prefix_size = buf_size; + priv->frag_info[i].frag_stride = + ALIGN(priv->frag_info[i].frag_size, + SMP_CACHE_BYTES); + buf_size += priv->frag_info[i].frag_size; + i++; + } + + priv->num_frags = i; + priv->rx_skb_size = eff_mtu; + priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); + + en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", + eff_mtu, priv->num_frags); + for (i = 0; i < priv->num_frags; i++) { + en_err(priv, + " frag:%d - size:%d prefix:%d stride:%d\n", + i, + priv->frag_info[i].frag_size, + priv->frag_info[i].frag_prefix_size, + priv->frag_info[i].frag_stride); + } +} + +/* RSS related functions */ + +static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, + struct mlx4_en_rx_ring *ring, + enum mlx4_qp_state *state, + struct mlx4_qp *qp) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_qp_context *context; + int err = 0; + + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed to allocate qp #%x\n", qpn); + goto out; + } + qp->event = mlx4_en_sqp_event; + + memset(context, 0, sizeof *context); + mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, + qpn, ring->cqn, -1, context); + context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); + + /* Cancel FCS removal if FW allows */ + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { + context->param3 |= cpu_to_be32(1 << 29); + if (priv->dev->features & NETIF_F_RXFCS) + ring->fcs_del = 0; + else + ring->fcs_del = ETH_FCS_LEN; + } else + ring->fcs_del = 0; + + err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); + if (err) { + mlx4_qp_remove(mdev->dev, qp); + mlx4_qp_free(mdev->dev, qp); + } + mlx4_en_update_rx_prod_db(ring); +out: + kfree(context); + return err; +} + +int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) +{ + int err; + u32 qpn; + + err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, + MLX4_RESERVE_A0_QP); + if (err) { + en_err(priv, "Failed reserving drop qpn\n"); + return err; + } + err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed allocating drop qp\n"); + mlx4_qp_release_range(priv->mdev->dev, qpn, 1); + return err; + } + + return 0; +} + +void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) +{ + u32 qpn; + + qpn = priv->drop_qp.qpn; + mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); + mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); + mlx4_qp_release_range(priv->mdev->dev, qpn, 1); +} + +/* Allocate rx qp's and configure them according to rss map */ +int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rss_map *rss_map = &priv->rss_map; + struct mlx4_qp_context context; + struct mlx4_rss_context *rss_context; + int rss_rings; + void *ptr; + u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | + MLX4_RSS_TCP_IPV6); + int i, qpn; + int err = 0; + int good_qps = 0; + + en_dbg(DRV, priv, "Configuring rss steering\n"); + err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, + priv->rx_ring_num, + &rss_map->base_qpn, 0); + if (err) { + en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); + return err; + } + + for (i = 0; i < priv->rx_ring_num; i++) { + qpn = rss_map->base_qpn + i; + err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], + &rss_map->state[i], + &rss_map->qps[i]); + if (err) + goto rss_err; + + ++good_qps; + } + + /* Configure RSS indirection qp */ + err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed to allocate RSS indirection QP\n"); + goto rss_err; + } + rss_map->indir_qp.event = mlx4_en_sqp_event; + mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, + priv->rx_ring[0]->cqn, -1, &context); + + if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) + rss_rings = priv->rx_ring_num; + else + rss_rings = priv->prof->rss_rings; + + ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) + + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; + rss_context = ptr; + rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | + (rss_map->base_qpn)); + rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); + if (priv->mdev->profile.udp_rss) { + rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; + rss_context->base_qpn_udp = rss_context->default_qpn; + } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); + rss_mask |= MLX4_RSS_BY_INNER_HEADERS; + } + + rss_context->flags = rss_mask; + rss_context->hash_fn = MLX4_RSS_HASH_TOP; + if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { + rss_context->hash_fn = MLX4_RSS_HASH_XOR; + } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { + rss_context->hash_fn = MLX4_RSS_HASH_TOP; + memcpy(rss_context->rss_key, priv->rss_key, + MLX4_EN_RSS_KEY_SIZE); + netdev_rss_key_fill(rss_context->rss_key, + MLX4_EN_RSS_KEY_SIZE); + } else { + en_err(priv, "Unknown RSS hash function requested\n"); + err = -EINVAL; + goto indir_err; + } + err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, + &rss_map->indir_qp, &rss_map->indir_state); + if (err) + goto indir_err; + + return 0; + +indir_err: + mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); + mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); + mlx4_qp_free(mdev->dev, &rss_map->indir_qp); +rss_err: + for (i = 0; i < good_qps; i++) { + mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); + mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); + mlx4_qp_free(mdev->dev, &rss_map->qps[i]); + } + mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); + return err; +} + +void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rss_map *rss_map = &priv->rss_map; + int i; + + mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); + mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); + mlx4_qp_free(mdev->dev, &rss_map->indir_qp); + + for (i = 0; i < priv->rx_ring_num; i++) { + mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); + mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); + mlx4_qp_free(mdev->dev, &rss_map->qps[i]); + } + mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c new file mode 100644 index 000000000..b66e03d97 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/delay.h> +#include <linux/mlx4/driver.h> + +#include "mlx4_en.h" + + +static int mlx4_en_test_registers(struct mlx4_en_priv *priv) +{ + return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) +{ + struct sk_buff *skb; + struct ethhdr *ethh; + unsigned char *packet; + unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD; + unsigned int i; + int err; + + + /* build the pkt before xmit */ + skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NET_IP_ALIGN); + + ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); + packet = (unsigned char *)skb_put(skb, packet_size); + memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_ARP); + skb_set_mac_header(skb, 0); + for (i = 0; i < packet_size; ++i) /* fill our packet */ + packet[i] = (unsigned char)(i & 0xff); + + /* xmit the pkt */ + err = mlx4_en_xmit(skb, priv->dev); + return err; +} + +static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) +{ + u32 loopback_ok = 0; + int i; + bool gro_enabled; + + priv->loopback_ok = 0; + priv->validate_loopback = 1; + gro_enabled = priv->dev->features & NETIF_F_GRO; + + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + priv->dev->features &= ~NETIF_F_GRO; + + /* xmit */ + if (mlx4_en_test_loopback_xmit(priv)) { + en_err(priv, "Transmitting loopback packet failed\n"); + goto mlx4_en_test_loopback_exit; + } + + /* polling for result */ + for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) { + msleep(MLX4_EN_LOOPBACK_TIMEOUT); + if (priv->loopback_ok) { + loopback_ok = 1; + break; + } + } + if (!loopback_ok) + en_err(priv, "Loopback packet didn't arrive\n"); + +mlx4_en_test_loopback_exit: + + priv->validate_loopback = 0; + + if (gro_enabled) + priv->dev->features |= NETIF_F_GRO; + + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + return !loopback_ok; +} + + +static int mlx4_en_test_link(struct mlx4_en_priv *priv) +{ + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + if (priv->port_state.link_state == 1) + return 0; + else + return 1; +} + +static int mlx4_en_test_speed(struct mlx4_en_priv *priv) +{ + + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + + /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */ + if (priv->port_state.link_speed != SPEED_100 && + priv->port_state.link_speed != SPEED_1000 && + priv->port_state.link_speed != SPEED_10000 && + priv->port_state.link_speed != SPEED_20000 && + priv->port_state.link_speed != SPEED_40000 && + priv->port_state.link_speed != SPEED_56000) + return priv->port_state.link_speed; + + return 0; +} + + +void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int i, carrier_ok; + + memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); + + if (*flags & ETH_TEST_FL_OFFLINE) { + /* disable the interface */ + carrier_ok = netif_carrier_ok(dev); + + netif_carrier_off(dev); + /* Wait until all tx queues are empty. + * there should not be any additional incoming traffic + * since we turned the carrier off */ + msleep(200); + + if (priv->mdev->dev->caps.flags & + MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { + buf[3] = mlx4_en_test_registers(priv); + if (priv->port_up) + buf[4] = mlx4_en_test_loopback(priv); + } + + if (carrier_ok) + netif_carrier_on(dev); + + } + buf[0] = mlx4_test_interrupts(mdev->dev); + buf[1] = mlx4_en_test_link(priv); + buf[2] = mlx4_en_test_speed(priv); + + for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) { + if (buf[i]) + *flags |= ETH_TEST_FL_FAILED; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c new file mode 100644 index 000000000..c10d98f6a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <asm/page.h> +#include <linux/mlx4/cq.h> +#include <linux/slab.h> +#include <linux/mlx4/qp.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#include <linux/prefetch.h> +#include <linux/vmalloc.h> +#include <linux/tcp.h> +#include <linux/ip.h> +#include <linux/moduleparam.h> + +#include "mlx4_en.h" + +int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring, u32 size, + u16 stride, int node, int queue_index) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring; + int tmp; + int err; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); + if (!ring) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + en_err(priv, "Failed allocating TX ring\n"); + return -ENOMEM; + } + } + + ring->size = size; + ring->size_mask = size - 1; + ring->stride = stride; + ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; + + tmp = size * sizeof(struct mlx4_en_tx_info); + ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); + if (!ring->tx_info) { + ring->tx_info = vmalloc(tmp); + if (!ring->tx_info) { + err = -ENOMEM; + goto err_ring; + } + } + + en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", + ring->tx_info, tmp); + + ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); + if (!ring->bounce_buf) { + ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + if (!ring->bounce_buf) { + err = -ENOMEM; + goto err_info; + } + } + ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + + /* Allocate HW buffers on provided NUMA node */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, + 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); + if (err) { + en_err(priv, "Failed allocating hwq resources\n"); + goto err_bounce; + } + + err = mlx4_en_map_buffer(&ring->wqres.buf); + if (err) { + en_err(priv, "Failed to map TX buffer\n"); + goto err_hwq_res; + } + + ring->buf = ring->wqres.buf.direct.buf; + + en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", + ring, ring->buf, ring->size, ring->buf_size, + (unsigned long long) ring->wqres.buf.direct.map); + + err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, + MLX4_RESERVE_ETH_BF_QP); + if (err) { + en_err(priv, "failed reserving qp for TX ring\n"); + goto err_map; + } + + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed allocating qp %d\n", ring->qpn); + goto err_reserve; + } + ring->qp.event = mlx4_en_sqp_event; + + err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); + if (err) { + en_dbg(DRV, priv, "working without blueflame (%d)\n", err); + ring->bf.uar = &mdev->priv_uar; + ring->bf.uar->map = mdev->uar_map; + ring->bf_enabled = false; + ring->bf_alloced = false; + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + ring->bf_alloced = true; + ring->bf_enabled = !!(priv->pflags & + MLX4_EN_PRIV_FLAGS_BLUEFLAME); + } + + ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; + ring->queue_index = queue_index; + + if (queue_index < priv->num_tx_rings_p_up) + cpumask_set_cpu(cpumask_local_spread(queue_index, + priv->mdev->dev->numa_node), + &ring->affinity_mask); + + *pring = ring; + return 0; + +err_reserve: + mlx4_qp_release_range(mdev->dev, ring->qpn, 1); +err_map: + mlx4_en_unmap_buffer(&ring->wqres.buf); +err_hwq_res: + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); +err_bounce: + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; +err_info: + kvfree(ring->tx_info); + ring->tx_info = NULL; +err_ring: + kfree(ring); + *pring = NULL; + return err; +} + +void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring = *pring; + en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); + + if (ring->bf_alloced) + mlx4_bf_free(mdev->dev, &ring->bf); + mlx4_qp_remove(mdev->dev, &ring->qp); + mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); + mlx4_en_unmap_buffer(&ring->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; + kvfree(ring->tx_info); + ring->tx_info = NULL; + kfree(ring); + *pring = NULL; +} + +int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int cq, int user_prio) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + ring->cqn = cq; + ring->prod = 0; + ring->cons = 0xffffffff; + ring->last_nr_txbb = 1; + memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); + memset(ring->buf, 0, ring->buf_size); + + ring->qp_state = MLX4_QP_STATE_RST; + ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); + ring->mr_key = cpu_to_be32(mdev->mr.key); + + mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, + ring->cqn, user_prio, &ring->context); + if (ring->bf_alloced) + ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + + err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, + &ring->qp, &ring->qp_state); + if (!cpumask_empty(&ring->affinity_mask)) + netif_set_xps_queue(priv->dev, &ring->affinity_mask, + ring->queue_index); + + return err; +} + +void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + + mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); +} + +static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) +{ + return ring->prod - ring->cons > ring->full_size; +} + +static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, int index, + u8 owner) +{ + __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + void *end = ring->buf + ring->buf_size; + __be32 *ptr = (__be32 *)tx_desc; + int i; + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + } + } else { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + if ((void *)ptr >= end) { + ptr = ring->buf; + stamp ^= cpu_to_be32(0x80000000); + } + } + } +} + + +static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp) +{ + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; + void *end = ring->buf + ring->buf_size; + struct sk_buff *skb = tx_info->skb; + int nr_maps = tx_info->nr_maps; + int i; + + /* We do not touch skb here, so prefetch skb->users location + * to speedup consume_skb() + */ + prefetchw(&skb->users); + + if (unlikely(timestamp)) { + struct skb_shared_hwtstamps hwts; + + mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp); + skb_tstamp_tx(skb, &hwts); + } + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + if (!tx_info->inl) { + if (tx_info->linear) + dma_unmap_single(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else + dma_unmap_page(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; + dma_unmap_page(priv->ddev, + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + } + } + } else { + if (!tx_info->inl) { + if ((void *) data >= end) { + data = ring->buf + ((void *)data - end); + } + + if (tx_info->linear) + dma_unmap_single(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else + dma_unmap_page(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; + /* Check for wraparound before unmapping */ + if ((void *) data >= end) + data = ring->buf; + dma_unmap_page(priv->ddev, + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + } + } + } + dev_consume_skb_any(skb); + return tx_info->nr_txbb; +} + + +int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int cnt = 0; + + /* Skip last polled descriptor */ + ring->cons += ring->last_nr_txbb; + en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", + ring->cons, ring->prod); + + if ((u32) (ring->prod - ring->cons) > ring->size) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Tx consumer passed producer!\n"); + return 0; + } + + while (ring->cons != ring->prod) { + ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, + ring->cons & ring->size_mask, + !!(ring->cons & ring->size), 0); + ring->cons += ring->last_nr_txbb; + cnt++; + } + + netdev_tx_reset_queue(ring->tx_queue); + + if (cnt) + en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); + + return cnt; +} + +static bool mlx4_en_process_tx_cq(struct net_device *dev, + struct mlx4_en_cq *cq) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_cq *mcq = &cq->mcq; + struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; + struct mlx4_cqe *cqe; + u16 index; + u16 new_index, ring_index, stamp_index; + u32 txbbs_skipped = 0; + u32 txbbs_stamp = 0; + u32 cons_index = mcq->cons_index; + int size = cq->size; + u32 size_mask = ring->size_mask; + struct mlx4_cqe *buf = cq->buf; + u32 packets = 0; + u32 bytes = 0; + int factor = priv->cqe_factor; + u64 timestamp = 0; + int done = 0; + int budget = priv->tx_work_limit; + u32 last_nr_txbb; + u32 ring_cons; + + if (!priv->port_up) + return true; + + netdev_txq_bql_complete_prefetchw(ring->tx_queue); + + index = cons_index & size_mask; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; + last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); + ring_cons = ACCESS_ONCE(ring->cons); + ring_index = ring_cons & size_mask; + stamp_index = ring_index; + + /* Process all completed CQEs */ + while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, + cons_index & size) && (done < budget)) { + /* + * make sure we read the CQE after we read the + * ownership bit + */ + dma_rmb(); + + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + MLX4_CQE_OPCODE_ERROR)) { + struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; + + en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", + cqe_err->vendor_err_syndrome, + cqe_err->syndrome); + } + + /* Skip over last polled CQE */ + new_index = be16_to_cpu(cqe->wqe_index) & size_mask; + + do { + txbbs_skipped += last_nr_txbb; + ring_index = (ring_index + last_nr_txbb) & size_mask; + if (ring->tx_info[ring_index].ts_requested) + timestamp = mlx4_en_get_cqe_ts(cqe); + + /* free next descriptor */ + last_nr_txbb = mlx4_en_free_tx_desc( + priv, ring, ring_index, + !!((ring_cons + txbbs_skipped) & + ring->size), timestamp); + + mlx4_en_stamp_wqe(priv, ring, stamp_index, + !!((ring_cons + txbbs_stamp) & + ring->size)); + stamp_index = ring_index; + txbbs_stamp = txbbs_skipped; + packets++; + bytes += ring->tx_info[ring_index].nr_bytes; + } while ((++done < budget) && (ring_index != new_index)); + + ++cons_index; + index = cons_index & size_mask; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; + } + + + /* + * To prevent CQ overflow we first update CQ consumer and only then + * the ring consumer. + */ + mcq->cons_index = cons_index; + mlx4_cq_set_ci(mcq); + wmb(); + + /* we want to dirty this cache line once */ + ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; + ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + + netdev_tx_completed_queue(ring->tx_queue, packets, bytes); + + /* Wakeup Tx queue if this stopped, and ring is not full. + */ + if (netif_tx_queue_stopped(ring->tx_queue) && + !mlx4_en_is_tx_ring_full(ring)) { + netif_tx_wake_queue(ring->tx_queue); + ring->wake_queue++; + } + return done < budget; +} + +void mlx4_en_tx_irq(struct mlx4_cq *mcq) +{ + struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); + else + mlx4_en_arm_cq(priv, cq); +} + +/* TX CQ polling - called by NAPI */ +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + int clean_complete; + + clean_complete = mlx4_en_process_tx_cq(dev, cq); + if (!clean_complete) + return budget; + + napi_complete(napi); + mlx4_en_arm_cq(priv, cq); + + return 0; +} + +static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + u32 index, + unsigned int desc_size) +{ + u32 copy = (ring->size - index) * TXBB_SIZE; + int i; + + for (i = desc_size - copy - 4; i >= 0; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + i)) = + *((u32 *) (ring->bounce_buf + copy + i)); + } + + for (i = copy - 4; i >= 4 ; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = + *((u32 *) (ring->bounce_buf + i)); + } + + /* Return real descriptor location */ + return ring->buf + index * TXBB_SIZE; +} + +/* Decide if skb can be inlined in tx descriptor to avoid dma mapping + * + * It seems strange we do not simply use skb_copy_bits(). + * This would allow to inline all skbs iff skb->len <= inline_thold + * + * Note that caller already checked skb was not a gso packet + */ +static bool is_inline(int inline_thold, const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + void **pfrag) +{ + void *ptr; + + if (skb->len > inline_thold || !inline_thold) + return false; + + if (shinfo->nr_frags == 1) { + ptr = skb_frag_address_safe(&shinfo->frags[0]); + if (unlikely(!ptr)) + return false; + *pfrag = ptr; + return true; + } + if (shinfo->nr_frags) + return false; + return true; +} + +static int inline_size(const struct sk_buff *skb) +{ + if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) + <= MLX4_INLINE_ALIGN) + return ALIGN(skb->len + CTRL_SIZE + + sizeof(struct mlx4_wqe_inline_seg), 16); + else + return ALIGN(skb->len + CTRL_SIZE + 2 * + sizeof(struct mlx4_wqe_inline_seg), 16); +} + +static int get_real_size(const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + struct net_device *dev, + int *lso_header_size, + bool *inline_ok, + void **pfrag) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int real_size; + + if (shinfo->gso_size) { + *inline_ok = false; + if (skb->encapsulation) + *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); + else + *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + + ALIGN(*lso_header_size + 4, DS_SIZE); + if (unlikely(*lso_header_size != skb_headlen(skb))) { + /* We add a segment for the skb linear buffer only if + * it contains data */ + if (*lso_header_size < skb_headlen(skb)) + real_size += DS_SIZE; + else { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Non-linear headers\n"); + return 0; + } + } + } else { + *lso_header_size = 0; + *inline_ok = is_inline(priv->prof->inline_thold, skb, + shinfo, pfrag); + + if (*inline_ok) + real_size = inline_size(skb); + else + real_size = CTRL_SIZE + + (shinfo->nr_frags + 1) * DS_SIZE; + } + + return real_size; +} + +static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, + const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + int real_size, u16 *vlan_tag, + int tx_ind, void *fragptr) +{ + struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; + int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + unsigned int hlen = skb_headlen(skb); + + if (skb->len <= spc) { + if (likely(skb->len >= MIN_PKT_LEN)) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + } else { + inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN); + memset(((void *)(inl + 1)) + skb->len, 0, + MIN_PKT_LEN - skb->len); + } + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen, fragptr, + skb_frag_size(&shinfo->frags[0])); + + } else { + inl->byte_count = cpu_to_be32(1 << 31 | spc); + if (hlen <= spc) { + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (hlen < spc) { + memcpy(((void *)(inl + 1)) + hlen, + fragptr, spc - hlen); + fragptr += spc - hlen; + } + inl = (void *) (inl + 1) + spc; + memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); + } else { + skb_copy_from_linear_data(skb, inl + 1, spc); + inl = (void *) (inl + 1) + spc; + skb_copy_from_linear_data_offset(skb, spc, inl + 1, + hlen - spc); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen - spc, + fragptr, + skb_frag_size(&shinfo->frags[0])); + } + + dma_wmb(); + inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); + } +} + +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 rings_p_up = priv->num_tx_rings_p_up; + u8 up = 0; + + if (dev->num_tc) + return skb_tx_hash(dev, skb); + + if (skb_vlan_tag_present(skb)) + up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; + + return fallback(dev, skb) % rings_p_up + up * rings_p_up; +} + +static void mlx4_bf_copy(void __iomem *dst, const void *src, + unsigned int bytecnt) +{ + __iowrite64_copy(dst, src, bytecnt / 8); +} + +netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct mlx4_en_priv *priv = netdev_priv(dev); + struct device *ddev = priv->ddev; + struct mlx4_en_tx_ring *ring; + struct mlx4_en_tx_desc *tx_desc; + struct mlx4_wqe_data_seg *data; + struct mlx4_en_tx_info *tx_info; + int tx_ind = 0; + int nr_txbb; + int desc_size; + int real_size; + u32 index, bf_index; + __be32 op_own; + u16 vlan_tag = 0; + int i_frag; + int lso_header_size; + void *fragptr = NULL; + bool bounce = false; + bool send_doorbell; + bool stop_queue; + bool inline_ok; + u32 ring_cons; + + if (!priv->port_up) + goto tx_drop; + + tx_ind = skb_get_queue_mapping(skb); + ring = priv->tx_ring[tx_ind]; + + /* fetch ring->cons far ahead before needing it to avoid stall */ + ring_cons = ACCESS_ONCE(ring->cons); + + real_size = get_real_size(skb, shinfo, dev, &lso_header_size, + &inline_ok, &fragptr); + if (unlikely(!real_size)) + goto tx_drop; + + /* Align descriptor to TXBB size */ + desc_size = ALIGN(real_size, TXBB_SIZE); + nr_txbb = desc_size / TXBB_SIZE; + if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Oversized header or SG list\n"); + goto tx_drop; + } + + if (skb_vlan_tag_present(skb)) + vlan_tag = skb_vlan_tag_get(skb); + + + netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); + + /* Track current inflight packets for performance analysis */ + AVG_PERF_COUNTER(priv->pstats.inflight_avg, + (u32)(ring->prod - ring_cons - 1)); + + /* Packet is good - grab an index and transmit it */ + index = ring->prod & ring->size_mask; + bf_index = ring->prod; + + /* See if we have enough space for whole descriptor TXBB for setting + * SW ownership on next descriptor; if not, use a bounce buffer. */ + if (likely(index + nr_txbb <= ring->size)) + tx_desc = ring->buf + index * TXBB_SIZE; + else { + tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; + bounce = true; + } + + /* Save skb in tx_info ring */ + tx_info = &ring->tx_info[index]; + tx_info->skb = skb; + tx_info->nr_txbb = nr_txbb; + + data = &tx_desc->data; + if (lso_header_size) + data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4, + DS_SIZE)); + + /* valid only for none inline segments */ + tx_info->data_offset = (void *)data - (void *)tx_desc; + + tx_info->inl = inline_ok; + + tx_info->linear = (lso_header_size < skb_headlen(skb) && + !inline_ok) ? 1 : 0; + + tx_info->nr_maps = shinfo->nr_frags + tx_info->linear; + data += tx_info->nr_maps - 1; + + if (!tx_info->inl) { + dma_addr_t dma = 0; + u32 byte_count = 0; + + /* Map fragments if any */ + for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) { + const struct skb_frag_struct *frag; + + frag = &shinfo->frags[i_frag]; + byte_count = skb_frag_size(frag); + dma = skb_frag_dma_map(ddev, frag, + 0, byte_count, + DMA_TO_DEVICE); + if (dma_mapping_error(ddev, dma)) + goto tx_drop_unmap; + + data->addr = cpu_to_be64(dma); + data->lkey = ring->mr_key; + dma_wmb(); + data->byte_count = cpu_to_be32(byte_count); + --data; + } + + /* Map linear part if needed */ + if (tx_info->linear) { + byte_count = skb_headlen(skb) - lso_header_size; + + dma = dma_map_single(ddev, skb->data + + lso_header_size, byte_count, + PCI_DMA_TODEVICE); + if (dma_mapping_error(ddev, dma)) + goto tx_drop_unmap; + + data->addr = cpu_to_be64(dma); + data->lkey = ring->mr_key; + dma_wmb(); + data->byte_count = cpu_to_be32(byte_count); + } + /* tx completion can avoid cache line miss for common cases */ + tx_info->map0_dma = dma; + tx_info->map0_byte_count = byte_count; + } + + /* + * For timestamping add flag to skb_shinfo and + * set flag for further reference + */ + tx_info->ts_requested = 0; + if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && + shinfo->tx_flags & SKBTX_HW_TSTAMP)) { + shinfo->tx_flags |= SKBTX_IN_PROGRESS; + tx_info->ts_requested = 1; + } + + /* Prepare ctrl segement apart opcode+ownership, which depends on + * whether LSO is used */ + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + if (!skb->encapsulation) + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + else + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); + ring->tx_csum++; + } + + if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) { + struct ethhdr *ethh; + + /* Copy dst mac address to wqe. This allows loopback in eSwitch, + * so that VFs and PF can communicate with each other + */ + ethh = (struct ethhdr *)skb->data; + tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); + tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); + } + + /* Handle LSO (TSO) packets */ + if (lso_header_size) { + int i; + + /* Mark opcode as LSO */ + op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + + /* Fill in the LSO prefix */ + tx_desc->lso.mss_hdr_size = cpu_to_be32( + shinfo->gso_size << 16 | lso_header_size); + + /* Copy headers; + * note that we already verified that it is linear */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); + + ring->tso_packets++; + + i = ((skb->len - lso_header_size) / shinfo->gso_size) + + !!((skb->len - lso_header_size) % shinfo->gso_size); + tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; + ring->packets += i; + } else { + /* Normal (Non LSO) packet */ + op_own = cpu_to_be32(MLX4_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + ring->packets++; + } + ring->bytes += tx_info->nr_bytes; + netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); + AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); + + if (tx_info->inl) + build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag, + tx_ind, fragptr); + + if (skb->encapsulation) { + struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); + if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP) + op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP); + else + op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP); + } + + ring->prod += nr_txbb; + + /* If we used a bounce buffer then copy descriptor back into place */ + if (unlikely(bounce)) + tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); + + skb_tx_timestamp(skb); + + /* Check available TXBBs And 2K spare for prefetch */ + stop_queue = mlx4_en_is_tx_ring_full(ring); + if (unlikely(stop_queue)) { + netif_tx_stop_queue(ring->tx_queue); + ring->queue_stopped++; + } + send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); + + real_size = (real_size / 16) & 0x3f; + + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && + !skb_vlan_tag_present(skb) && send_doorbell) { + tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | + cpu_to_be32(real_size); + + op_own |= htonl((bf_index & 0xffff) << 8); + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + + wmb(); + + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, + desc_size); + + wmb(); + + ring->bf.offset ^= ring->bf.buf_size; + } else { + tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * + !!skb_vlan_tag_present(skb); + tx_desc->ctrl.fence_size = real_size; + + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + if (send_doorbell) { + wmb(); + /* Since there is no iowrite*_native() that writes the + * value as is, without byteswapping - using the one + * the doesn't do byteswapping in the relevant arch + * endianness. + */ +#if defined(__LITTLE_ENDIAN) + iowrite32( +#else + iowrite32be( +#endif + ring->doorbell_qpn, + ring->bf.uar->map + MLX4_SEND_DOORBELL); + } else { + ring->xmit_more++; + } + } + + if (unlikely(stop_queue)) { + /* If queue was emptied after the if (stop_queue) , and before + * the netif_tx_stop_queue() - need to wake the queue, + * or else it will remain stopped forever. + * Need a memory barrier to make sure ring->cons was not + * updated before queue was stopped. + */ + smp_rmb(); + + ring_cons = ACCESS_ONCE(ring->cons); + if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { + netif_tx_wake_queue(ring->tx_queue); + ring->wake_queue++; + } + } + return NETDEV_TX_OK; + +tx_drop_unmap: + en_err(priv, "DMA mapping error\n"); + + while (++i_frag < shinfo->nr_frags) { + ++data; + dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + } + +tx_drop: + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; +} + diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c new file mode 100644 index 000000000..2619c9fbf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -0,0 +1,1441 @@ +/* + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> + +#include <linux/mlx4/cmd.h> +#include <linux/cpu_rmap.h> + +#include "mlx4.h" +#include "fw.h" + +enum { + MLX4_IRQNAME_SIZE = 32 +}; + +enum { + MLX4_NUM_ASYNC_EQE = 0x100, + MLX4_NUM_SPARE_EQE = 0x80, + MLX4_EQ_ENTRY_SIZE = 0x20 +}; + +#define MLX4_EQ_STATUS_OK ( 0 << 28) +#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) +#define MLX4_EQ_OWNER_SW ( 0 << 24) +#define MLX4_EQ_OWNER_HW ( 1 << 24) +#define MLX4_EQ_FLAG_EC ( 1 << 18) +#define MLX4_EQ_FLAG_OI ( 1 << 17) +#define MLX4_EQ_STATE_ARMED ( 9 << 8) +#define MLX4_EQ_STATE_FIRED (10 << 8) +#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) + +#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ + (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ + (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ + (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ + (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ + (1ull << MLX4_EVENT_TYPE_CMD) | \ + (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ + (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ + (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ + (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) + +static u64 get_async_ev_mask(struct mlx4_dev *dev) +{ + u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); + + return async_ev_mask; +} + +static void eq_set_ci(struct mlx4_eq *eq, int req_not) +{ + __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | + req_not << 31), + eq->doorbell); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, + u8 eqe_size) +{ + /* (entry & (eq->nent - 1)) gives us a cyclic array */ + unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; + /* CX3 is capable of extending the EQE from 32 to 64 bytes with + * strides of 64B,128B and 256B. + * When 64B EQE is used, the first (in the lower addresses) + * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes + * contain the legacy EQE information. + * In all other cases, the first 32B contains the legacy EQE info. + */ + return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; +} + +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) +{ + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); + return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; +} + +static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) +{ + struct mlx4_eqe *eqe = + &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; + return (!!(eqe->owner & 0x80) ^ + !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? + eqe : NULL; +} + +void mlx4_gen_slave_eqe(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, struct mlx4_mfunc_master_ctx, + slave_event_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; + struct mlx4_eqe *eqe; + u8 slave; + int i; + + for (eqe = next_slave_event_eqe(slave_eq); eqe; + eqe = next_slave_event_eqe(slave_eq)) { + slave = eqe->slave_id; + + /* All active slaves need to receive the event */ + if (slave == ALL_SLAVES) { + for (i = 0; i <= dev->persist->num_vfs; i++) { + if (mlx4_GEN_EQE(dev, i, eqe)) + mlx4_warn(dev, "Failed to generate event for slave %d\n", + i); + } + } else { + if (mlx4_GEN_EQE(dev, slave, eqe)) + mlx4_warn(dev, "Failed to generate event for slave %d\n", + slave); + } + ++slave_eq->cons; + } +} + + +static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; + struct mlx4_eqe *s_eqe; + unsigned long flags; + + spin_lock_irqsave(&slave_eq->event_lock, flags); + s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; + if ((!!(s_eqe->owner & 0x80)) ^ + (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { + mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n", + slave); + spin_unlock_irqrestore(&slave_eq->event_lock, flags); + return; + } + + memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); + s_eqe->slave_id = slave; + /* ensure all information is written before setting the ownersip bit */ + dma_wmb(); + s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; + ++slave_eq->prod; + + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.slave_event_work); + spin_unlock_irqrestore(&slave_eq->event_lock, flags); +} + +static void mlx4_slave_event(struct mlx4_dev *dev, int slave, + struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave < 0 || slave > dev->persist->num_vfs || + slave == dev->caps.function || + !priv->mfunc.master.slave_state[slave].active) + return; + + slave_event(dev, slave, eqe); +} + +int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) +{ + struct mlx4_eqe eqe; + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; + + if (!s_slave->active) + return 0; + + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; + eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; + eqe.event.port_mgmt_change.port = port; + + return mlx4_GEN_EQE(dev, slave, &eqe); +} +EXPORT_SYMBOL(mlx4_gen_pkey_eqe); + +int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) +{ + struct mlx4_eqe eqe; + + /*don't send if we don't have the that slave */ + if (dev->persist->num_vfs < slave) + return 0; + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; + eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; + eqe.event.port_mgmt_change.port = port; + + return mlx4_GEN_EQE(dev, slave, &eqe); +} +EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); + +int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, + u8 port_subtype_change) +{ + struct mlx4_eqe eqe; + + /*don't send if we don't have the that slave */ + if (dev->persist->num_vfs < slave) + return 0; + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; + eqe.subtype = port_subtype_change; + eqe.event.port_change.port = cpu_to_be32(port << 28); + + mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, + port_subtype_change, slave, port); + return mlx4_GEN_EQE(dev, slave, &eqe); +} +EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); + +enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { + pr_err("%s: Error: asking for slave:%d, port:%d\n", + __func__, slave, port); + return SLAVE_PORT_DOWN; + } + return s_state[slave].port_state[port]; +} +EXPORT_SYMBOL(mlx4_get_slave_port_state); + +static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, + enum slave_port_state state) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { + pr_err("%s: Error: asking for slave:%d, port:%d\n", + __func__, slave, port); + return -1; + } + s_state[slave].port_state[port] = state; + + return 0; +} + +static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) +{ + int i; + enum slave_port_gen_event gen_event; + struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, + port); + + for (i = 0; i < dev->persist->num_vfs + 1; i++) + if (test_bit(i, slaves_pport.slaves)) + set_and_calc_slave_port_state(dev, i, port, + event, &gen_event); +} +/************************************************************************** + The function get as input the new event to that port, + and according to the prev state change the slave's port state. + The events are: + MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, + MLX4_PORT_STATE_DEV_EVENT_PORT_UP + MLX4_PORT_STATE_IB_EVENT_GID_VALID + MLX4_PORT_STATE_IB_EVENT_GID_INVALID +***************************************************************************/ +int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, + u8 port, int event, + enum slave_port_gen_event *gen_event) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *ctx = NULL; + unsigned long flags; + int ret = -1; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + enum slave_port_state cur_state = + mlx4_get_slave_port_state(dev, slave, port); + + *gen_event = SLAVE_PORT_GEN_EVENT_NONE; + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { + pr_err("%s: Error: asking for slave:%d, port:%d\n", + __func__, slave, port); + return ret; + } + + ctx = &priv->mfunc.master.slave_state[slave]; + spin_lock_irqsave(&ctx->lock, flags); + + switch (cur_state) { + case SLAVE_PORT_DOWN: + if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PENDING_UP); + break; + case SLAVE_PENDING_UP: + if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PORT_DOWN); + else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PORT_UP); + *gen_event = SLAVE_PORT_GEN_EVENT_UP; + } + break; + case SLAVE_PORT_UP: + if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PORT_DOWN); + *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; + } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == + event) { + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PENDING_UP); + *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; + } + break; + default: + pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n", + __func__, slave, port); + goto out; + } + ret = mlx4_get_slave_port_state(dev, slave, port); + +out: + spin_unlock_irqrestore(&ctx->lock, flags); + return ret; +} + +EXPORT_SYMBOL(set_and_calc_slave_port_state); + +int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) +{ + struct mlx4_eqe eqe; + + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; + eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; + eqe.event.port_mgmt_change.port = port; + eqe.event.port_mgmt_change.params.port_info.changed_attr = + cpu_to_be32((u32) attr); + + slave_event(dev, ALL_SLAVES, &eqe); + return 0; +} +EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); + +void mlx4_master_handle_slave_flr(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, struct mlx4_mfunc_master_ctx, + slave_flr_event_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = + container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int i; + int err; + unsigned long flags; + + mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); + + for (i = 0 ; i < dev->num_slaves; i++) { + + if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { + mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", + i); + /* In case of 'Reset flow' FLR can be generated for + * a slave before mlx4_load_one is done. + * make sure interface is up before trying to delete + * slave resources which weren't allocated yet. + */ + if (dev->persist->interface_state & + MLX4_INTERFACE_STATE_UP) + mlx4_delete_all_resources_for_slave(dev, i); + /*return the slave to running mode*/ + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; + slave_state[i].is_slave_going_down = 0; + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + /*notify the FW:*/ + err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n", + i); + } + } +} + +static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_eqe *eqe; + int cqn = -1; + int eqes_found = 0; + int set_ci = 0; + int port; + int slave = 0; + int ret; + u32 flr_slave; + u8 update_slave_state; + int i; + enum slave_port_gen_event gen_event; + unsigned long flags; + struct mlx4_vport_state *s_info; + int eqe_size = dev->caps.eqe_size; + + while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + switch (eqe->type) { + case MLX4_EVENT_TYPE_COMP: + cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; + mlx4_cq_completion(dev, cqn); + break; + + case MLX4_EVENT_TYPE_PATH_MIG: + case MLX4_EVENT_TYPE_COMM_EST: + case MLX4_EVENT_TYPE_SQ_DRAINED: + case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: + case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX4_EVENT_TYPE_PATH_MIG_FAILED: + case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: + mlx4_dbg(dev, "event %d arrived\n", eqe->type); + if (mlx4_is_master(dev)) { + /* forward only to slave owning the QP */ + ret = mlx4_get_slave_from_resource_id(dev, + RES_QP, + be32_to_cpu(eqe->event.qp.qpn) + & 0xffffff, &slave); + if (ret && ret != -ENOENT) { + mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + + if (!ret && slave != dev->caps.function) { + mlx4_slave_event(dev, slave, eqe); + break; + } + + } + mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & + 0xffffff, eqe->type); + break; + + case MLX4_EVENT_TYPE_SRQ_LIMIT: + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", + __func__); + case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: + if (mlx4_is_master(dev)) { + /* forward only to slave owning the SRQ */ + ret = mlx4_get_slave_from_resource_id(dev, + RES_SRQ, + be32_to_cpu(eqe->event.srq.srqn) + & 0xffffff, + &slave); + if (ret && ret != -ENOENT) { + mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); + + if (!ret && slave != dev->caps.function) { + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); + mlx4_slave_event(dev, slave, eqe); + break; + } + } + mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & + 0xffffff, eqe->type); + break; + + case MLX4_EVENT_TYPE_CMD: + mlx4_cmd_event(dev, + be16_to_cpu(eqe->event.cmd.token), + eqe->event.cmd.status, + be64_to_cpu(eqe->event.cmd.out_param)); + break; + + case MLX4_EVENT_TYPE_PORT_CHANGE: { + struct mlx4_slaves_pport slaves_port; + port = be32_to_cpu(eqe->event.port_change.port) >> 28; + slaves_port = mlx4_phys_to_slaves_pport(dev, port); + if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, + port); + mlx4_priv(dev)->sense.do_sense_port[port] = 1; + if (!mlx4_is_master(dev)) + break; + for (i = 0; i < dev->persist->num_vfs + 1; + i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { + if (i == mlx4_master_func_num(dev)) + continue; + mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", + __func__, i, port); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + mlx4_slave_event(dev, i, eqe); + } + } else { /* IB port */ + set_and_calc_slave_port_state(dev, i, port, + MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, + &gen_event); + /*we can be in pending state, then do not send port_down event*/ + if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { + if (i == mlx4_master_func_num(dev)) + continue; + mlx4_slave_event(dev, i, eqe); + } + } + } + } else { + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); + + mlx4_priv(dev)->sense.do_sense_port[port] = 0; + + if (!mlx4_is_master(dev)) + break; + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + for (i = 0; + i < dev->persist->num_vfs + 1; + i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; + if (i == mlx4_master_func_num(dev)) + continue; + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + mlx4_slave_event(dev, i, eqe); + } + } + else /* IB port */ + /* port-up event will be sent to a slave when the + * slave's alias-guid is set. This is done in alias_GUID.c + */ + set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); + } + break; + } + + case MLX4_EVENT_TYPE_CQ_ERROR: + mlx4_warn(dev, "CQ %s on CQN %06x\n", + eqe->event.cq_err.syndrome == 1 ? + "overrun" : "access violation", + be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); + if (mlx4_is_master(dev)) { + ret = mlx4_get_slave_from_resource_id(dev, + RES_CQ, + be32_to_cpu(eqe->event.cq_err.cqn) + & 0xffffff, &slave); + if (ret && ret != -ENOENT) { + mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + + if (!ret && slave != dev->caps.function) { + mlx4_slave_event(dev, slave, eqe); + break; + } + } + mlx4_cq_event(dev, + be32_to_cpu(eqe->event.cq_err.cqn) + & 0xffffff, + eqe->type); + break; + + case MLX4_EVENT_TYPE_EQ_OVERFLOW: + mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); + break; + + case MLX4_EVENT_TYPE_OP_REQUIRED: + atomic_inc(&priv->opreq_count); + /* FW commands can't be executed from interrupt context + * working in deferred task + */ + queue_work(mlx4_wq, &priv->opreq_task); + break; + + case MLX4_EVENT_TYPE_COMM_CHANNEL: + if (!mlx4_is_master(dev)) { + mlx4_warn(dev, "Received comm channel event for non master device\n"); + break; + } + memcpy(&priv->mfunc.master.comm_arm_bit_vector, + eqe->event.comm_channel_arm.bit_vec, + sizeof eqe->event.comm_channel_arm.bit_vec); + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.comm_work); + break; + + case MLX4_EVENT_TYPE_FLR_EVENT: + flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); + if (!mlx4_is_master(dev)) { + mlx4_warn(dev, "Non-master function received FLR event\n"); + break; + } + + mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); + + if (flr_slave >= dev->num_slaves) { + mlx4_warn(dev, + "Got FLR for unknown function: %d\n", + flr_slave); + update_slave_state = 0; + } else + update_slave_state = 1; + + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + if (update_slave_state) { + priv->mfunc.master.slave_state[flr_slave].active = false; + priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; + priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; + } + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, + flr_slave); + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.slave_flr_event_work); + break; + + case MLX4_EVENT_TYPE_FATAL_WARNING: + if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { + if (mlx4_is_master(dev)) + for (i = 0; i < dev->num_slaves; i++) { + mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n", + __func__, i); + if (i == dev->caps.function) + continue; + mlx4_slave_event(dev, i, eqe); + } + mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n", + be16_to_cpu(eqe->event.warming.warning_threshold), + be16_to_cpu(eqe->event.warming.current_temperature)); + } else + mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + eqe->slave_id, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + + break; + + case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, + (unsigned long) eqe); + break; + + case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: + switch (eqe->subtype) { + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: + mlx4_warn(dev, "Bad cable detected on port %u\n", + eqe->event.bad_cable.port); + break; + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: + mlx4_warn(dev, "Unsupported cable detected\n"); + break; + default: + mlx4_dbg(dev, + "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + break; + } + break; + + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: + case MLX4_EVENT_TYPE_ECC_DETECT: + default: + mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + eqe->slave_id, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + break; + }; + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* + * The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX4_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { + eq_set_ci(eq, 0); + set_ci = 0; + } + } + + eq_set_ci(eq, 1); + + /* cqn is 24bit wide but is initialized such that its higher bits + * are ones too. Thus, if we got any event, cqn's high bits should be off + * and we need to schedule the tasklet. + */ + if (!(cqn & ~0xffffff)) + tasklet_schedule(&eq->tasklet_ctx.task); + + return eqes_found; +} + +static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) +{ + struct mlx4_dev *dev = dev_ptr; + struct mlx4_priv *priv = mlx4_priv(dev); + int work = 0; + int i; + + writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) + work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); + + return IRQ_RETVAL(work); +} + +static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) +{ + struct mlx4_eq *eq = eq_ptr; + struct mlx4_dev *dev = eq->dev; + + mlx4_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq_info *event_eq = + priv->mfunc.master.slave_state[slave].event_eq; + u32 in_modifier = vhcr->in_modifier; + u32 eqn = in_modifier & 0x3FF; + u64 in_param = vhcr->in_param; + int err = 0; + int i; + + if (slave == dev->caps.function) + err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + if (!err) + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) + if (in_param & (1LL << i)) + event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; + + return err; +} + +static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, + int eq_num) +{ + return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} + +static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int eq_num) +{ + return mlx4_cmd(dev, mailbox->dma, eq_num, 0, + MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) +{ + return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_num_eq_uar(struct mlx4_dev *dev) +{ + /* + * Each UAR holds 4 EQ doorbells. To figure out how many UARs + * we need to map, take the difference of highest index and + * the lowest index we'll use and add 1. + */ + return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + + dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; +} + +static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int index; + + index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; + + if (!priv->eq_table.uar_map[index]) { + priv->eq_table.uar_map[index] = + ioremap(pci_resource_start(dev->persist->pdev, 2) + + ((eq->eqn / 4) << PAGE_SHIFT), + PAGE_SIZE); + if (!priv->eq_table.uar_map[index]) { + mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", + eq->eqn); + return NULL; + } + } + + return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); +} + +static void mlx4_unmap_uar(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) + if (priv->eq_table.uar_map[i]) { + iounmap(priv->eq_table.uar_map[i]); + priv->eq_table.uar_map[i] = NULL; + } +} + +static int mlx4_create_eq(struct mlx4_dev *dev, int nent, + u8 intr, struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_eq_context *eq_context; + int npages; + u64 *dma_list = NULL; + dma_addr_t t; + u64 mtt_addr; + int err = -ENOMEM; + int i; + + eq->dev = dev; + eq->nent = roundup_pow_of_two(max(nent, 2)); + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B. + */ + npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; + + eq->page_list = kmalloc(npages * sizeof *eq->page_list, + GFP_KERNEL); + if (!eq->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + eq->page_list[i].buf = NULL; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_out_free; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto err_out_free; + eq_context = mailbox->buf; + + for (i = 0; i < npages; ++i) { + eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> + pdev->dev, + PAGE_SIZE, &t, + GFP_KERNEL); + if (!eq->page_list[i].buf) + goto err_out_free_pages; + + dma_list[i] = t; + eq->page_list[i].map = t; + + memset(eq->page_list[i].buf, 0, PAGE_SIZE); + } + + eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); + if (eq->eqn == -1) + goto err_out_free_pages; + + eq->doorbell = mlx4_get_eq_uar(dev, eq); + if (!eq->doorbell) { + err = -ENOMEM; + goto err_out_free_eq; + } + + err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); + if (err) + goto err_out_free_eq; + + err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); + if (err) + goto err_out_free_mtt; + + eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | + MLX4_EQ_STATE_ARMED); + eq_context->log_eq_size = ilog2(eq->nent); + eq_context->intr = intr; + eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); + eq_context->mtt_base_addr_h = mtt_addr >> 32; + eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + + err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); + if (err) { + mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); + goto err_out_free_mtt; + } + + kfree(dma_list); + mlx4_free_cmd_mailbox(dev, mailbox); + + eq->cons_index = 0; + + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb, + (unsigned long)&eq->tasklet_ctx); + + return err; + +err_out_free_mtt: + mlx4_mtt_cleanup(dev, &eq->mtt); + +err_out_free_eq: + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); + +err_out_free_pages: + for (i = 0; i < npages; ++i) + if (eq->page_list[i].buf) + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].map); + + mlx4_free_cmd_mailbox(dev, mailbox); + +err_out_free: + kfree(eq->page_list); + kfree(dma_list); + +err_out: + return err; +} + +static void mlx4_free_eq(struct mlx4_dev *dev, + struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int i; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B + */ + int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; + + err = mlx4_HW2SW_EQ(dev, eq->eqn); + if (err) + mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); + + synchronize_irq(eq->irq); + tasklet_disable(&eq->tasklet_ctx.task); + + mlx4_mtt_cleanup(dev, &eq->mtt); + for (i = 0; i < npages; ++i) + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].map); + + kfree(eq->page_list); + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); +} + +static void mlx4_free_irqs(struct mlx4_dev *dev) +{ + struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; + struct mlx4_priv *priv = mlx4_priv(dev); + int i, vec; + + if (eq_table->have_irq) + free_irq(dev->persist->pdev->irq, dev); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) + if (eq_table->eq[i].have_irq) { + free_irq(eq_table->eq[i].irq, eq_table->eq + i); + eq_table->eq[i].have_irq = 0; + } + + for (i = 0; i < dev->caps.comp_pool; i++) { + /* + * Freeing the assigned irq's + * all bits should be 0, but we need to validate + */ + if (priv->msix_ctl.pool_bm & 1ULL << i) { + /* NO need protecting*/ + vec = dev->caps.num_comp_vectors + 1 + i; + free_irq(priv->eq_table.eq[vec].irq, + &priv->eq_table.eq[vec]); + } + } + + + kfree(eq_table->irq_names); +} + +static int mlx4_map_clr_int(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.clr_int_bar) + + priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); + if (!priv->clr_base) { + mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); + return -ENOMEM; + } + + return 0; +} + +static void mlx4_unmap_clr_int(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + iounmap(priv->clr_base); +} + +int mlx4_alloc_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, + sizeof *priv->eq_table.eq, GFP_KERNEL); + if (!priv->eq_table.eq) + return -ENOMEM; + + return 0; +} + +void mlx4_free_eq_table(struct mlx4_dev *dev) +{ + kfree(mlx4_priv(dev)->eq_table.eq); +} + +int mlx4_init_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int i; + + priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), + sizeof *priv->eq_table.uar_map, + GFP_KERNEL); + if (!priv->eq_table.uar_map) { + err = -ENOMEM; + goto err_out_free; + } + + err = mlx4_bitmap_init(&priv->eq_table.bitmap, + roundup_pow_of_two(dev->caps.num_eqs), + dev->caps.num_eqs - 1, + dev->caps.reserved_eqs, + roundup_pow_of_two(dev->caps.num_eqs) - + dev->caps.num_eqs); + if (err) + goto err_out_free; + + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) + priv->eq_table.uar_map[i] = NULL; + + if (!mlx4_is_slave(dev)) { + err = mlx4_map_clr_int(dev); + if (err) + goto err_out_bitmap; + + priv->eq_table.clr_mask = + swab32(1 << (priv->eq_table.inta_pin & 31)); + priv->eq_table.clr_int = priv->clr_base + + (priv->eq_table.inta_pin < 32 ? 4 : 0); + } + + priv->eq_table.irq_names = + kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + + dev->caps.comp_pool), + GFP_KERNEL); + if (!priv->eq_table.irq_names) { + err = -ENOMEM; + goto err_out_bitmap; + } + + for (i = 0; i < dev->caps.num_comp_vectors; ++i) { + err = mlx4_create_eq(dev, dev->caps.num_cqs - + dev->caps.reserved_cqs + + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, + &priv->eq_table.eq[i]); + if (err) { + --i; + goto err_out_unmap; + } + } + + err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, + &priv->eq_table.eq[dev->caps.num_comp_vectors]); + if (err) + goto err_out_comp; + + /*if additional completion vectors poolsize is 0 this loop will not run*/ + for (i = dev->caps.num_comp_vectors + 1; + i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { + + err = mlx4_create_eq(dev, dev->caps.num_cqs - + dev->caps.reserved_cqs + + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, + &priv->eq_table.eq[i]); + if (err) { + --i; + goto err_out_unmap; + } + } + + + if (dev->flags & MLX4_FLAG_MSI_X) { + const char *eq_name; + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { + if (i < dev->caps.num_comp_vectors) { + snprintf(priv->eq_table.irq_names + + i * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, + "mlx4-comp-%d@pci:%s", i, + pci_name(dev->persist->pdev)); + } else { + snprintf(priv->eq_table.irq_names + + i * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, + "mlx4-async@pci:%s", + pci_name(dev->persist->pdev)); + } + + eq_name = priv->eq_table.irq_names + + i * MLX4_IRQNAME_SIZE; + err = request_irq(priv->eq_table.eq[i].irq, + mlx4_msi_x_interrupt, 0, eq_name, + priv->eq_table.eq + i); + if (err) + goto err_out_async; + + priv->eq_table.eq[i].have_irq = 1; + } + } else { + snprintf(priv->eq_table.irq_names, + MLX4_IRQNAME_SIZE, + DRV_NAME "@pci:%s", + pci_name(dev->persist->pdev)); + err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, + IRQF_SHARED, priv->eq_table.irq_names, dev); + if (err) + goto err_out_async; + + priv->eq_table.have_irq = 1; + } + + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + if (err) + mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) + eq_set_ci(&priv->eq_table.eq[i], 1); + + return 0; + +err_out_async: + mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); + +err_out_comp: + i = dev->caps.num_comp_vectors - 1; + +err_out_unmap: + while (i >= 0) { + mlx4_free_eq(dev, &priv->eq_table.eq[i]); + --i; + } + if (!mlx4_is_slave(dev)) + mlx4_unmap_clr_int(dev); + mlx4_free_irqs(dev); + +err_out_bitmap: + mlx4_unmap_uar(dev); + mlx4_bitmap_cleanup(&priv->eq_table.bitmap); + +err_out_free: + kfree(priv->eq_table.uar_map); + + return err; +} + +void mlx4_cleanup_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + + mlx4_free_irqs(dev); + + for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) + mlx4_free_eq(dev, &priv->eq_table.eq[i]); + + if (!mlx4_is_slave(dev)) + mlx4_unmap_clr_int(dev); + + mlx4_unmap_uar(dev); + mlx4_bitmap_cleanup(&priv->eq_table.bitmap); + + kfree(priv->eq_table.uar_map); +} + +/* A test that verifies that we can accept interrupts on all + * the irq vectors of the device. + * Interrupts are checked using the NOP command. + */ +int mlx4_test_interrupts(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + int err; + + err = mlx4_NOP(dev); + /* When not in MSI_X, there is only one irq to check */ + if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) + return err; + + /* A loop over all completion vectors, for each vector we will check + * whether it works by mapping command completions to that vector + * and performing a NOP command + */ + for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { + /* Temporary use polling for command completions */ + mlx4_cmd_use_polling(dev); + + /* Map the new eq to handle all asynchronous events */ + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[i].eqn); + if (err) { + mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); + mlx4_cmd_use_events(dev); + break; + } + + /* Go back to using events */ + mlx4_cmd_use_events(dev); + err = mlx4_NOP(dev); + } + + /* Return to default */ + mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + return err; +} +EXPORT_SYMBOL(mlx4_test_interrupts); + +int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, + int *vector) +{ + + struct mlx4_priv *priv = mlx4_priv(dev); + int vec = 0, err = 0, i; + + mutex_lock(&priv->msix_ctl.pool_lock); + for (i = 0; !vec && i < dev->caps.comp_pool; i++) { + if (~priv->msix_ctl.pool_bm & 1ULL << i) { + priv->msix_ctl.pool_bm |= 1ULL << i; + vec = dev->caps.num_comp_vectors + 1 + i; + snprintf(priv->eq_table.irq_names + + vec * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, "%s", name); +#ifdef CONFIG_RFS_ACCEL + if (rmap) { + err = irq_cpu_rmap_add(rmap, + priv->eq_table.eq[vec].irq); + if (err) + mlx4_warn(dev, "Failed adding irq rmap\n"); + } +#endif + err = request_irq(priv->eq_table.eq[vec].irq, + mlx4_msi_x_interrupt, 0, + &priv->eq_table.irq_names[vec<<5], + priv->eq_table.eq + vec); + if (err) { + /*zero out bit by fliping it*/ + priv->msix_ctl.pool_bm ^= 1 << i; + vec = 0; + continue; + /*we dont want to break here*/ + } + + eq_set_ci(&priv->eq_table.eq[vec], 1); + } + } + mutex_unlock(&priv->msix_ctl.pool_lock); + + if (vec) { + *vector = vec; + } else { + *vector = 0; + err = (i == dev->caps.comp_pool) ? -ENOSPC : err; + } + return err; +} +EXPORT_SYMBOL(mlx4_assign_eq); + +int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->eq_table.eq[vec].irq; +} +EXPORT_SYMBOL(mlx4_eq_get_irq); + +void mlx4_release_eq(struct mlx4_dev *dev, int vec) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + /*bm index*/ + int i = vec - dev->caps.num_comp_vectors - 1; + + if (likely(i >= 0)) { + /*sanity check , making sure were not trying to free irq's + Belonging to a legacy EQ*/ + mutex_lock(&priv->msix_ctl.pool_lock); + if (priv->msix_ctl.pool_bm & 1ULL << i) { + free_irq(priv->eq_table.eq[vec].irq, + &priv->eq_table.eq[vec]); + priv->msix_ctl.pool_bm &= ~(1ULL << i); + } + mutex_unlock(&priv->msix_ctl.pool_lock); + } + +} +EXPORT_SYMBOL(mlx4_release_eq); + diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c new file mode 100644 index 000000000..e30bf57ad --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -0,0 +1,2760 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include <linux/mlx4/cmd.h> +#include <linux/module.h> +#include <linux/cache.h> + +#include "fw.h" +#include "icm.h" + +enum { + MLX4_COMMAND_INTERFACE_MIN_REV = 2, + MLX4_COMMAND_INTERFACE_MAX_REV = 3, + MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, +}; + +extern void __buggy_use_of_MLX4_GET(void); +extern void __buggy_use_of_MLX4_PUT(void); + +static bool enable_qos = true; +module_param(enable_qos, bool, 0444); +MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); + +#define MLX4_GET(dest, source, offset) \ + do { \ + void *__p = (char *) (source) + (offset); \ + u64 val; \ + switch (sizeof (dest)) { \ + case 1: (dest) = *(u8 *) __p; break; \ + case 2: (dest) = be16_to_cpup(__p); break; \ + case 4: (dest) = be32_to_cpup(__p); break; \ + case 8: val = get_unaligned((u64 *)__p); \ + (dest) = be64_to_cpu(val); break; \ + default: __buggy_use_of_MLX4_GET(); \ + } \ + } while (0) + +#define MLX4_PUT(dest, source, offset) \ + do { \ + void *__d = ((char *) (dest) + (offset)); \ + switch (sizeof(source)) { \ + case 1: *(u8 *) __d = (source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ + default: __buggy_use_of_MLX4_PUT(); \ + } \ + } while (0) + +static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) +{ + static const char *fname[] = { + [ 0] = "RC transport", + [ 1] = "UC transport", + [ 2] = "UD transport", + [ 3] = "XRC transport", + [ 6] = "SRQ support", + [ 7] = "IPoIB checksum offload", + [ 8] = "P_Key violation counter", + [ 9] = "Q_Key violation counter", + [12] = "Dual Port Different Protocol (DPDP) support", + [15] = "Big LSO headers", + [16] = "MW support", + [17] = "APM support", + [18] = "Atomic ops support", + [19] = "Raw multicast support", + [20] = "Address vector port checking support", + [21] = "UD multicast support", + [30] = "IBoE support", + [32] = "Unicast loopback support", + [34] = "FCS header control", + [37] = "Wake On LAN (port1) support", + [38] = "Wake On LAN (port2) support", + [40] = "UDP RSS support", + [41] = "Unicast VEP steering support", + [42] = "Multicast VEP steering support", + [48] = "Counters support", + [52] = "RSS IP fragments support", + [53] = "Port ETS Scheduler support", + [55] = "Port link type sensing support", + [59] = "Port management change event support", + [61] = "64 byte EQE support", + [62] = "64 byte CQE support", + }; + int i; + + mlx4_dbg(dev, "DEV_CAP flags:\n"); + for (i = 0; i < ARRAY_SIZE(fname); ++i) + if (fname[i] && (flags & (1LL << i))) + mlx4_dbg(dev, " %s\n", fname[i]); +} + +static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) +{ + static const char * const fname[] = { + [0] = "RSS support", + [1] = "RSS Toeplitz Hash Function support", + [2] = "RSS XOR Hash Function support", + [3] = "Device managed flow steering support", + [4] = "Automatic MAC reassignment support", + [5] = "Time stamping support", + [6] = "VST (control vlan insertion/stripping) support", + [7] = "FSM (MAC anti-spoofing) support", + [8] = "Dynamic QP updates support", + [9] = "Device managed flow steering IPoIB support", + [10] = "TCP/IP offloads/flow-steering for VXLAN support", + [11] = "MAD DEMUX (Secure-Host) support", + [12] = "Large cache line (>64B) CQE stride support", + [13] = "Large cache line (>64B) EQE stride support", + [14] = "Ethernet protocol control support", + [15] = "Ethernet Backplane autoneg support", + [16] = "CONFIG DEV support", + [17] = "Asymmetric EQs support", + [18] = "More than 80 VFs support", + [19] = "Performance optimized for limited rule configuration flow steering support", + [20] = "Recoverable error events support", + [21] = "Port Remap support", + [22] = "QCN support", + [23] = "QP rate limiting support", + [24] = "Ethernet Flow control statistics support", + [25] = "Granular QoS per VF support", + [26] = "Port ETS Scheduler support", + [27] = "Port beacon support", + [28] = "RX-ALL support", + }; + int i; + + for (i = 0; i < ARRAY_SIZE(fname); ++i) + if (fname[i] && (flags & (1LL << i))) + mlx4_dbg(dev, " %s\n", fname[i]); +} + +int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err = 0; + +#define MOD_STAT_CFG_IN_SIZE 0x100 + +#define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 +#define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); + MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 in_modifier; + u8 field; + u16 field16; + int err; + +#define QUERY_FUNC_BUS_OFFSET 0x00 +#define QUERY_FUNC_DEVICE_OFFSET 0x01 +#define QUERY_FUNC_FUNCTION_OFFSET 0x01 +#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 +#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 +#define QUERY_FUNC_MAX_EQ_OFFSET 0x06 +#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + in_modifier = slave; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, + MLX4_CMD_QUERY_FUNC, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); + func->bus = field & 0xf; + MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); + func->device = field & 0xf1; + MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); + func->function = field & 0x7; + MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); + func->physical_function = field & 0xf; + MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); + func->rsvd_eqs = field16 & 0xffff; + MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); + func->max_eq = field16 & 0xffff; + MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); + func->rsvd_uars = field & 0x0f; + + mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", + func->bus, func->device, func->function, func->physical_function, + func->max_eq, func->rsvd_eqs, func->rsvd_uars); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u8 field, port; + u32 size, proxy_qp, qkey; + int err = 0; + struct mlx4_func func; + +#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 +#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 +#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 +#define QUERY_FUNC_CAP_FMR_OFFSET 0x8 +#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 +#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 +#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 +#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 +#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 +#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 +#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c +#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 +#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 + +#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 +#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 +#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 +#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 +#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 +#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 + +#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c + +#define QUERY_FUNC_CAP_FMR_FLAG 0x80 +#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 +#define QUERY_FUNC_CAP_FLAG_ETH 0x80 +#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 +#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 +#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 + +#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) +#define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) + +/* when opcode modifier = 1 */ +#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 +#define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 +#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 +#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc + +#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 +#define QUERY_FUNC_CAP_QP0_PROXY 0x14 +#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 +#define QUERY_FUNC_CAP_QP1_PROXY 0x1c +#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 + +#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 +#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 +#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 +#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 + +#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 +#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) + + if (vhcr->op_modifier == 1) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); + int converted_port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier); + + if (converted_port < 0) + return -EINVAL; + + vhcr->in_modifier = converted_port; + /* phys-port = logical-port */ + field = vhcr->in_modifier - + find_first_bit(actv_ports.ports, dev->caps.num_ports); + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + + port = vhcr->in_modifier; + proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; + + /* Set nic_info bit to mark new fields support */ + field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; + + if (mlx4_vf_smi_enabled(dev, slave, port) && + !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { + field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; + MLX4_PUT(outbox->buf, qkey, + QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); + } + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); + + /* size is now the QP number */ + size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); + + size += 2; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); + + MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); + proxy_qp += 2; + MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); + + MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], + QUERY_FUNC_CAP_PHYS_PORT_ID); + + } else if (vhcr->op_modifier == 0) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); + /* enable rdma and ethernet interfaces, new quota locations, + * and reserved lkey + */ + field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | + QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | + QUERY_FUNC_CAP_FLAG_RESD_LKEY); + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); + + field = min( + bitmap_weight(actv_ports.ports, dev->caps.num_ports), + dev->caps.num_ports); + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); + + size = dev->caps.function_caps; /* set PF behaviours */ + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); + + field = 0; /* protected FMR support not available as yet */ + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + size = dev->caps.num_qps; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + size = dev->caps.num_srqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + size = dev->caps.num_cqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || + mlx4_QUERY_FUNC(dev, &func, slave)) { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + dev->caps.num_eqs : + rounddown_pow_of_two(dev->caps.num_eqs); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = dev->caps.reserved_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } else { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + func.max_eq : + rounddown_pow_of_two(func.max_eq); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = func.rsvd_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } + + size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + size = dev->caps.num_mpts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + size = dev->caps.num_mtts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); + + size = dev->caps.num_mgms + dev->caps.num_amgms; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); + + size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | + QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); + + size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); + } else + err = -EINVAL; + + return err; +} + +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, + struct mlx4_func_cap *func_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field, op_modifier; + u32 size, qkey; + int err = 0, quotas = 0; + u32 in_modifier; + + op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ + in_modifier = op_modifier ? gen_or_port : + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, + MLX4_CMD_QUERY_FUNC_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + goto out; + + outbox = mailbox->buf; + + if (!op_modifier) { + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); + if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { + mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); + err = -EPROTONOSUPPORT; + goto out; + } + func_cap->flags = field; + quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); + func_cap->num_ports = field; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); + func_cap->pf_context_behaviour = size; + + if (quotas) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + func_cap->qp_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + func_cap->srq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + func_cap->cq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + func_cap->mpt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + func_cap->mtt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + func_cap->mcg_quota = size & 0xFFFFFF; + + } else { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); + func_cap->qp_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); + func_cap->srq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); + func_cap->cq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); + func_cap->mpt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); + func_cap->mtt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); + func_cap->mcg_quota = size & 0xFFFFFF; + } + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + func_cap->max_eq = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + func_cap->reserved_eq = size & 0xFFFFFF; + + if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); + func_cap->reserved_lkey = size; + } else { + func_cap->reserved_lkey = 0; + } + + func_cap->extra_flags = 0; + + /* Mailbox data from 0x6c and onward should only be treated if + * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags + */ + if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); + if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) + func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; + if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) + func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; + } + + goto out; + } + + /* logical port query */ + if (gen_or_port > dev->caps.num_ports) { + err = -EINVAL; + goto out; + } + + MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); + if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { + mlx4_err(dev, "VLAN is enforced on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { + mlx4_err(dev, "Force mac is enabled on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); + if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { + mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + } + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + func_cap->physical_port = field; + if (func_cap->physical_port != gen_or_port) { + err = -ENOSYS; + goto out; + } + + if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { + MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); + func_cap->qp0_qkey = qkey; + } else { + func_cap->qp0_qkey = 0; + } + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); + func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); + func_cap->qp0_proxy_qpn = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); + func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); + func_cap->qp1_proxy_qpn = size & 0xFFFFFF; + + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) + MLX4_GET(func_cap->phys_port_id, outbox, + QUERY_FUNC_CAP_PHYS_PORT_ID); + + /* All other resources are allocated by the master, but we still report + * 'num' and 'reserved' capabilities as follows: + * - num remains the maximum resource index + * - 'num - reserved' is the total available objects of a resource, but + * resource indices may be less than 'reserved' + * TODO: set per-resource quotas */ + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 field32, flags, ext_flags; + u16 size; + u16 stat_rate; + int err; + int i; + +#define QUERY_DEV_CAP_OUT_SIZE 0x100 +#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 +#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 +#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 +#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 +#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 +#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 +#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 +#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 +#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 +#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a +#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b +#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d +#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e +#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f +#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 +#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 +#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 +#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 +#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 +#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 +#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b +#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d +#define QUERY_DEV_CAP_RSS_OFFSET 0x2e +#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f +#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 +#define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 +#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 +#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 +#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 +#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 +#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b +#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c +#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e +#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f +#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 +#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 +#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 +#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 +#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b +#define QUERY_DEV_CAP_BF_OFFSET 0x4c +#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d +#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e +#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f +#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 +#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 +#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 +#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 +#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 +#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 +#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 +#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 +#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 +#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 +#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 +#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 +#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 +#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 +#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 +#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 +#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 +#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a +#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b +#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 +#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 +#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 +#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 +#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 +#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a +#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c +#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e +#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 +#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 +#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 +#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 +#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 +#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 +#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c +#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d +#define QUERY_DEV_CAP_VXLAN 0x9e +#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 +#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 +#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac +#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 + + + dev_cap->flags2 = 0; + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); + dev_cap->reserved_qps = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); + dev_cap->max_qps = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); + dev_cap->reserved_srqs = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); + dev_cap->max_srqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); + dev_cap->max_cq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); + dev_cap->reserved_cqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); + dev_cap->max_cqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); + dev_cap->max_mpts = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); + dev_cap->reserved_eqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); + dev_cap->max_eqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); + dev_cap->reserved_mtts = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); + dev_cap->max_mrw_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); + dev_cap->reserved_mrws = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); + dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); + dev_cap->num_sys_eqs = size & 0xfff; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); + dev_cap->max_requester_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); + dev_cap->max_responder_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); + field &= 0x1f; + if (!field) + dev_cap->max_gso_sz = 0; + else + dev_cap->max_gso_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); + if (field & 0x20) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; + if (field & 0x10) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; + field &= 0xf; + if (field) { + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; + dev_cap->max_rss_tbl_sz = 1 << field; + } else + dev_cap->max_rss_tbl_sz = 0; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); + dev_cap->max_rdma_global = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); + dev_cap->local_ca_ack_delay = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->num_ports = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); + dev_cap->max_msg_sz = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); + if (field & 0x10) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; + dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); + dev_cap->fs_max_num_qp_per_entry = field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + if (field & 0x1) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; + MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); + dev_cap->stat_rate_support = stat_rate; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; + MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); + dev_cap->flags = flags | (u64)ext_flags << 32; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); + dev_cap->reserved_uars = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); + dev_cap->uar_size = 1 << ((field & 0x3f) + 20); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); + dev_cap->min_page_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); + if (field & 0x80) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); + dev_cap->bf_reg_size = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); + if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) + field = 3; + dev_cap->bf_regs_per_page = 1 << (field & 0x3f); + } else { + dev_cap->bf_reg_size = 0; + } + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); + dev_cap->max_sq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); + dev_cap->max_sq_desc_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); + dev_cap->max_qp_per_mcg = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); + dev_cap->reserved_mgms = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); + dev_cap->max_mcgs = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); + dev_cap->reserved_pds = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); + dev_cap->max_pds = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); + dev_cap->reserved_xrcds = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); + dev_cap->max_xrcds = 1 << (field & 0x1f); + + MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); + dev_cap->rdmarc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); + dev_cap->qpc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); + dev_cap->aux_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); + dev_cap->altc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); + dev_cap->eqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); + dev_cap->cqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); + dev_cap->srq_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); + dev_cap->cmpt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); + dev_cap->mtt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); + dev_cap->dmpt_entry_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); + dev_cap->max_srq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); + dev_cap->max_qp_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); + dev_cap->resize_srq = field & 1; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); + dev_cap->max_rq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); + dev_cap->max_rq_desc_sz = size; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 4)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; + if (field & (1 << 6)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + if (field & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + MLX4_GET(dev_cap->bmme_flags, outbox, + QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + if (field & 0x20) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; + if (field & (1 << 2)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + MLX4_GET(dev_cap->reserved_lkey, outbox, + QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; + if (field32 & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); + if (field & 1<<6) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); + if (field & 1<<3) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; + MLX4_GET(dev_cap->max_icm_sz, outbox, + QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); + if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) + MLX4_GET(dev_cap->max_counters, outbox, + QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); + + MLX4_GET(field32, outbox, + QUERY_DEV_CAP_MAD_DEMUX_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; + + MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, + QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); + dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; + MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, + QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); + dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; + + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + dev_cap->rl_caps.num_rates = size; + if (dev_cap->rl_caps.num_rates) { + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); + dev_cap->rl_caps.max_val = size & 0xfff; + dev_cap->rl_caps.max_unit = size >> 14; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); + dev_cap->rl_caps.min_val = size & 0xfff; + dev_cap->rl_caps.min_unit = size >> 14; + } + + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + if (field32 & (1 << 16)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; + if (field32 & (1 << 26)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; + if (field32 & (1 << 20)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; + if (field32 & (1 << 21)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; + + for (i = 1; i <= dev_cap->num_ports; i++) { + err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); + if (err) + goto out; + } + + /* + * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then + * we can't use any EQs whose doorbell falls on that page, + * even if the EQ itself isn't reserved. + */ + if (dev_cap->num_sys_eqs == 0) + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + else + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + if (dev_cap->bf_reg_size > 0) + mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", + dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); + else + mlx4_dbg(dev, "BlueFlame not available\n"); + + mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", + dev_cap->bmme_flags, dev_cap->reserved_lkey); + mlx4_dbg(dev, "Max ICM size %lld MB\n", + (unsigned long long) dev_cap->max_icm_sz >> 20); + mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); + mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); + mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); + mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, + dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", + dev_cap->reserved_mrws, dev_cap->reserved_mtts); + mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); + mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", + dev_cap->max_pds, dev_cap->reserved_mgms); + mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", + dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); + mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", + dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, + dev_cap->port_cap[1].max_port_width); + mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", + dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); + mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", + dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); + mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); + mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); + mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); + mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", + dev_cap->dmfs_high_rate_qpn_base); + mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", + dev_cap->dmfs_high_rate_qpn_range); + + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { + struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; + + mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", + rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, + rl_caps->min_unit, rl_caps->min_val); + } + + dump_dev_cap_flags(dev, dev_cap->flags); + dump_dev_cap_flags2(dev, dev_cap->flags2); +} + +int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 field32; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + port_cap->max_vl = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); + port_cap->ib_mtu = field >> 4; + port_cap->max_port_width = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); + port_cap->max_gids = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); + port_cap->max_pkeys = 1 << (field & 0xf); + } else { +#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 +#define QUERY_PORT_MTU_OFFSET 0x01 +#define QUERY_PORT_ETH_MTU_OFFSET 0x02 +#define QUERY_PORT_WIDTH_OFFSET 0x06 +#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 +#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a +#define QUERY_PORT_MAX_VL_OFFSET 0x0b +#define QUERY_PORT_MAC_OFFSET 0x10 +#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 +#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c +#define QUERY_PORT_TRANS_CODE_OFFSET 0x20 + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + port_cap->supported_port_types = field & 3; + port_cap->suggested_type = (field >> 3) & 1; + port_cap->default_sense = (field >> 4) & 1; + port_cap->dmfs_optimized_state = (field >> 5) & 1; + MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); + port_cap->ib_mtu = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); + port_cap->max_port_width = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); + port_cap->max_gids = 1 << (field >> 4); + port_cap->max_pkeys = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); + port_cap->max_vl = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); + port_cap->log_max_macs = field & 0xf; + port_cap->log_max_vlans = field >> 4; + MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); + MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); + MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); + port_cap->trans_type = field32 >> 24; + port_cap->vendor_oui = field32 & 0xffffff; + MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); + MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +#define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) +#define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) +#define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) +#define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) + +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u64 flags; + int err = 0; + u8 field; + u16 field16; + u32 bmme_flags, field32; + int real_port; + int slave_port; + int first_port; + struct mlx4_active_ports actv_ports; + + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* add port mng change event capability and disable mw type 1 + * unconditionally to slaves + */ + MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; + flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; + actv_ports = mlx4_get_active_ports(dev, slave); + first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); + for (slave_port = 0, real_port = first_port; + real_port < first_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + ++real_port, ++slave_port) { + if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) + flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; + else + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); + } + for (; slave_port < dev->caps.num_ports; ++slave_port) + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); + + /* Not exposing RSS IP fragments to guests */ + flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; + MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); + field &= ~0x0F; + field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); + + /* For guests, disable timestamp */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + + /* For guests, disable vxlan tunneling and QoS support */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); + field &= 0xd7; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); + + /* For guests, disable port BEACON */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + + /* For guests, report Blueflame disabled */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); + + /* For guests, disable mw type 2 and port remap*/ + MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; + bmme_flags &= ~MLX4_FLAG_PORT_REMAP; + MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + + /* turn off device-managed steering capability if not enabled */ + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + MLX4_GET(field, outbox->buf, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + } + + /* turn off ipoib managed steering for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + field &= ~0x80; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + + /* turn off host side virt features (VST, FSM, etc) for guests */ + MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | + DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); + MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + + /* turn off QCN for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + field &= 0xfe; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + + /* turn off QP max-rate limiting for guests */ + field16 = 0; + MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + + /* turn off QoS per VF support for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + field &= 0xef; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + + /* turn off ignore FCS feature for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + field &= 0xfb; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + + return 0; +} + +int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 def_mac; + u8 port_type; + u16 short_field; + int err; + int admin_link_state; + int port = mlx4_slave_convert_port(dev, slave, + vhcr->in_modifier & 0xFF); + +#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 +#define MLX4_PORT_LINK_UP_MASK 0x80 +#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c +#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e + + if (port < 0) + return -EINVAL; + + /* Protect against untrusted guests: enforce that this is the + * QUERY_PORT general query. + */ + if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) + return -EINVAL; + + vhcr->in_modifier = port; + + err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + if (!err && dev->caps.function != slave) { + def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; + MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); + + /* get port type - currently only eth is enabled */ + MLX4_GET(port_type, outbox->buf, + QUERY_PORT_SUPPORTED_TYPE_OFFSET); + + /* No link sensing allowed */ + port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; + /* set port type to currently operating port type */ + port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); + + admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; + if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) + port_type |= MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) + port_type &= ~MLX4_PORT_LINK_UP_MASK; + + MLX4_PUT(outbox->buf, port_type, + QUERY_PORT_SUPPORTED_TYPE_OFFSET); + + if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) + short_field = mlx4_get_slave_num_gids(dev, slave, port); + else + short_field = 1; /* slave max gids */ + MLX4_PUT(outbox->buf, short_field, + QUERY_PORT_CUR_MAX_GID_OFFSET); + + short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; + MLX4_PUT(outbox->buf, short_field, + QUERY_PORT_CUR_MAX_PKEY_OFFSET); + } + + return err; +} + +int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, + int *gid_tbl_len, int *pkey_tbl_len) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u16 field; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + outbox = mailbox->buf; + + MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); + *gid_tbl_len = field; + + MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); + *pkey_tbl_len = field; + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); + +int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_icm_iter iter; + __be64 *pages; + int lg; + int nent = 0; + int i; + int err = 0; + int ts = 0, tc = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + pages = mailbox->buf; + + for (mlx4_icm_first(icm, &iter); + !mlx4_icm_last(&iter); + mlx4_icm_next(&iter)) { + /* + * We have to pass pages that are aligned to their + * size, so find the least significant 1 in the + * address or size and use that as our log2 size. + */ + lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; + if (lg < MLX4_ICM_PAGE_SHIFT) { + mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n", + MLX4_ICM_PAGE_SIZE, + (unsigned long long) mlx4_icm_addr(&iter), + mlx4_icm_size(&iter)); + err = -EINVAL; + goto out; + } + + for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { + if (virt != -1) { + pages[nent * 2] = cpu_to_be64(virt); + virt += 1 << lg; + } + + pages[nent * 2 + 1] = + cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | + (lg - MLX4_ICM_PAGE_SHIFT)); + ts += 1 << (lg - 10); + ++tc; + + if (++nent == MLX4_MAILBOX_SIZE / 16) { + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + if (err) + goto out; + nent = 0; + } + } + } + + if (nent) + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + goto out; + + switch (op) { + case MLX4_CMD_MAP_FA: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM_AUX: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM: + mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n", + tc, ts, (unsigned long long) virt - (ts << 10)); + break; + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); +} + +int mlx4_UNMAP_FA(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + + +int mlx4_RUN_FW(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +int mlx4_QUERY_FW(struct mlx4_dev *dev) +{ + struct mlx4_fw *fw = &mlx4_priv(dev)->fw; + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err = 0; + u64 fw_ver; + u16 cmd_if_rev; + u8 lg; + +#define QUERY_FW_OUT_SIZE 0x100 +#define QUERY_FW_VER_OFFSET 0x00 +#define QUERY_FW_PPF_ID 0x09 +#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a +#define QUERY_FW_MAX_CMD_OFFSET 0x0f +#define QUERY_FW_ERR_START_OFFSET 0x30 +#define QUERY_FW_ERR_SIZE_OFFSET 0x38 +#define QUERY_FW_ERR_BAR_OFFSET 0x3c + +#define QUERY_FW_SIZE_OFFSET 0x00 +#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 +#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 + +#define QUERY_FW_COMM_BASE_OFFSET 0x40 +#define QUERY_FW_COMM_BAR_OFFSET 0x48 + +#define QUERY_FW_CLOCK_OFFSET 0x50 +#define QUERY_FW_CLOCK_BAR 0x58 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); + /* + * FW subminor version is at more significant bits than minor + * version, so swap here. + */ + dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | + ((fw_ver & 0xffff0000ull) >> 16) | + ((fw_ver & 0x0000ffffull) << 16); + + MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); + dev->caps.function = lg; + + if (mlx4_is_slave(dev)) + goto out; + + + MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || + cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { + mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n", + cmd_if_rev); + mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff); + mlx4_err(dev, "This driver version supports only revisions %d to %d\n", + MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); + err = -ENODEV; + goto out; + } + + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) + dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; + + MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + cmd->max_cmds = 1 << lg; + + mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff, + cmd_if_rev, cmd->max_cmds); + + MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); + MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); + MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); + fw->catas_bar = (fw->catas_bar >> 6) * 2; + + mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", + (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); + + MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); + MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); + MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); + fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; + + MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); + MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); + fw->comm_bar = (fw->comm_bar >> 6) * 2; + mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", + fw->comm_bar, fw->comm_base); + mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); + + MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); + MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); + fw->clock_bar = (fw->clock_bar >> 6) * 2; + mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", + fw->clock_bar, fw->clock_offset); + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + fw->fw_pages = + ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", + (unsigned long long) fw->clr_int_base, fw->clr_int_bar); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u8 *outbuf; + int err; + + outbuf = outbox->buf; + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* for slaves, set pci PPF ID to invalid and zero out everything + * else except FW version */ + outbuf[0] = outbuf[1] = 0; + memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); + outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; + + return 0; +} + +static void get_board_id(void *vsd, char *board_id) +{ + int i; + +#define VSD_OFFSET_SIG1 0x00 +#define VSD_OFFSET_SIG2 0xde +#define VSD_OFFSET_MLX_BOARD_ID 0xd0 +#define VSD_OFFSET_TS_BOARD_ID 0x20 + +#define VSD_SIGNATURE_TOPSPIN 0x5ad + + memset(board_id, 0, MLX4_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + u32 *bid_u32 = (u32 *)board_id; + + for (i = 0; i < 4; ++i) { + u32 *addr; + u32 val; + + addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); + val = get_unaligned(addr); + val = swab32(val); + put_unaligned(val, &bid_u32[i]); + } + } +} + +int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err; + +#define QUERY_ADAPTER_OUT_SIZE 0x100 +#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 +#define QUERY_ADAPTER_VSD_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, + adapter->board_id); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *inbox; + int err; + static const u8 a0_dmfs_hw_steering[] = { + [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, + [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, + [MLX4_STEERING_DMFS_A0_STATIC] = 2, + [MLX4_STEERING_DMFS_A0_DISABLE] = 3 + }; + +#define INIT_HCA_IN_SIZE 0x200 +#define INIT_HCA_VERSION_OFFSET 0x000 +#define INIT_HCA_VERSION 2 +#define INIT_HCA_VXLAN_OFFSET 0x0c +#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e +#define INIT_HCA_FLAGS_OFFSET 0x014 +#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 +#define INIT_HCA_QPC_OFFSET 0x020 +#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) +#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) +#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) +#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) +#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) +#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) +#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) +#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) +#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) +#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) +#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) +#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) +#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) +#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) +#define INIT_HCA_MCAST_OFFSET 0x0c0 +#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) +#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) +#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) +#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) +#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) +#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 +#define INIT_HCA_FS_PARAM_OFFSET 0x1d0 +#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) +#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) +#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) +#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) +#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) +#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) +#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) +#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) +#define INIT_HCA_TPT_OFFSET 0x0f0 +#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) +#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) +#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) +#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) +#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) +#define INIT_HCA_UAR_OFFSET 0x120 +#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) +#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; + + *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = + (ilog2(cache_line_size()) - 4) << 5; + +#if defined(__LITTLE_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); +#elif defined(__BIG_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); +#else +#error Host endianness not defined +#endif + /* Check port for UD address vector: */ + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); + + /* Enable IPoIB checksumming if we can: */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); + + /* Enable QoS support if module parameter set */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); + + /* enable counters */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); + + /* Enable RSS spread to fragmented IP packets when supported */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); + + /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { + *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); + dev->caps.eqe_size = 64; + dev->caps.eqe_factor = 1; + } else { + dev->caps.eqe_size = 32; + dev->caps.eqe_factor = 0; + } + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { + *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); + dev->caps.cqe_size = 64; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } else { + dev->caps.cqe_size = 32; + } + + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { + dev->caps.eqe_size = cache_line_size(); + dev->caps.cqe_size = cache_line_size(); + dev->caps.eqe_factor = 0; + MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | + (ilog2(dev->caps.eqe_size) - 5)), + INIT_HCA_EQE_CQE_STRIDE_OFFSET); + + /* User still need to know to support CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); + MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); + MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); + MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); + + /* steering attributes */ + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= + cpu_to_be32(1 << + INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); + + MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, + INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, + INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + /* Enable Ethernet flow steering + * with udp unicast and tcp unicast + */ + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_STATIC) + MLX4_PUT(inbox, + (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), + INIT_HCA_FS_ETH_BITS_OFFSET); + MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, + INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); + /* Enable IPoIB flow steering + * with udp unicast and tcp unicast + */ + MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), + INIT_HCA_FS_IB_BITS_OFFSET); + MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, + INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); + + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + MLX4_PUT(inbox, + ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] + << 6)), + INIT_HCA_FS_A0_OFFSET); + } else { + MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, + INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_hash_sz, + INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, + INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) + MLX4_PUT(inbox, (u8) (1 << 3), + INIT_HCA_UC_STEERING_OFFSET); + } + + /* TPT attributes */ + + MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); + MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); + MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + + /* set parser VXLAN attributes */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { + u8 parser_params = 0; + MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); + } + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + + if (err) + mlx4_err(dev, "INIT_HCA returns %d\n", err); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_HCA(struct mlx4_dev *dev, + struct mlx4_init_hca_param *param) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *outbox; + u32 dword_field; + int err; + u8 byte_field; + static const u8 a0_dmfs_query_hw_steering[] = { + [0] = MLX4_STEERING_DMFS_A0_DEFAULT, + [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, + [2] = MLX4_STEERING_DMFS_A0_STATIC, + [3] = MLX4_STEERING_DMFS_A0_DISABLE + }; + +#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 +#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_QUERY_HCA, + MLX4_CMD_TIME_CLASS_B, + !mlx4_is_slave(dev)); + if (err) + goto out; + + MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); + MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); + MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); + MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); + MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); + MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); + MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); + MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); + MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); + + MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); + if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { + param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; + } else { + MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); + if (byte_field & 0x8) + param->steering_mode = MLX4_STEERING_MODE_B0; + else + param->steering_mode = MLX4_STEERING_MODE_A0; + } + + if (dword_field & (1 << 13)) + param->rss_ip_frags = 1; + + /* steering attributes */ + if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); + MLX4_GET(param->log_mc_entry_sz, outbox, + INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + MLX4_GET(param->log_mc_table_sz, outbox, + INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + MLX4_GET(byte_field, outbox, + INIT_HCA_FS_A0_OFFSET); + param->dmfs_high_steer_mode = + a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; + } else { + MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); + MLX4_GET(param->log_mc_entry_sz, outbox, + INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_GET(param->log_mc_hash_sz, outbox, + INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_GET(param->log_mc_table_sz, outbox, + INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + } + + /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); + if (byte_field & 0x20) /* 64-bytes eqe enabled */ + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; + if (byte_field & 0x40) /* 64-bytes cqe enabled */ + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); + if (byte_field) { + param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; + param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; + param->cqe_size = 1 << ((byte_field & + MLX4_CQE_SIZE_MASK_STRIDE) + 5); + param->eqe_size = 1 << (((byte_field & + MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); + } + + /* TPT attributes */ + + MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); + MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); + MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *outbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); + return PTR_ERR(mailbox); + } + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_QUERY_HCA, + MLX4_CMD_TIME_CLASS_B, + !mlx4_is_slave(dev)); + if (err) { + mlx4_warn(dev, "hca_core_clock update failed\n"); + goto out; + } + + MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 + * and real QP0 are active, so that the paravirtualized QP0 is ready + * to operate */ +static int check_qp0_state(struct mlx4_dev *dev, int function, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + /* irrelevant if not infiniband */ + if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && + priv->mfunc.master.qp0_state[port].qp0_active) + return 1; + return 0; +} + +int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); + int err; + + if (port < 0) + return -EINVAL; + + if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) + return 0; + + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { + /* Enable port only if it was previously disabled */ + if (!priv->mfunc.master.init_port_ref[port]) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + } + priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); + } else { + if (slave == mlx4_master_func_num(dev)) { + if (check_qp0_state(dev, slave, port) && + !priv->mfunc.master.qp0_state[port].port_active) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + priv->mfunc.master.qp0_state[port].port_active = 1; + priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); + } + } else + priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); + } + ++priv->mfunc.master.init_port_ref[port]; + return 0; +} + +int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags; + u16 field; + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { +#define INIT_PORT_IN_SIZE 256 +#define INIT_PORT_FLAGS_OFFSET 0x00 +#define INIT_PORT_FLAG_SIG (1 << 18) +#define INIT_PORT_FLAG_NG (1 << 17) +#define INIT_PORT_FLAG_G0 (1 << 16) +#define INIT_PORT_VL_SHIFT 4 +#define INIT_PORT_PORT_WIDTH_SHIFT 8 +#define INIT_PORT_MTU_OFFSET 0x04 +#define INIT_PORT_MAX_GID_OFFSET 0x06 +#define INIT_PORT_MAX_PKEY_OFFSET 0x0a +#define INIT_PORT_GUID0_OFFSET 0x10 +#define INIT_PORT_NODE_GUID_OFFSET 0x18 +#define INIT_PORT_SI_GUID_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + flags = 0; + flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; + flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; + MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); + + field = 128 << dev->caps.ib_mtu_cap[port]; + MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); + field = dev->caps.gid_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); + field = dev->caps.pkey_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + } else + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + + if (!err) + mlx4_hca_core_clock_update(dev); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); + +int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); + int err; + + if (port < 0) + return -EINVAL; + + if (!(priv->mfunc.master.slave_state[slave].init_port_mask & + (1 << port))) + return 0; + + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { + if (priv->mfunc.master.init_port_ref[port] == 1) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + } + priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); + } else { + /* infiniband port */ + if (slave == mlx4_master_func_num(dev)) { + if (!priv->mfunc.master.qp0_state[port].qp0_active && + priv->mfunc.master.qp0_state[port].port_active) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); + priv->mfunc.master.qp0_state[port].port_active = 0; + } + } else + priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); + } + --priv->mfunc.master.init_port_ref[port]; + return 0; +} + +int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) +{ + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} +EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); + +int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) +{ + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); +} + +struct mlx4_config_dev { + __be32 update_flags; + __be32 rsvd1[3]; + __be16 vxlan_udp_dport; + __be16 rsvd2; + __be32 rsvd3; + __be32 roce_flags; + __be32 rsvd4[25]; + __be16 rsvd5; + u8 rsvd6; + u8 rx_checksum_val; +}; + +#define MLX4_VXLAN_UDP_DPORT (1 << 0) +#define MLX4_DISABLE_RX_PORT BIT(18) + +static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + if (!err) + memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +/* Conversion between the HW values and the actual functionality. + * The value represented by the array index, + * and the functionality determined by the flags. + */ +static const u8 config_dev_csum_flags[] = { + [0] = 0, + [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, + [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_L4, + [3] = MLX4_RX_CSUM_MODE_L4 | + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_MULTI_VLAN +}; + +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params) +{ + struct mlx4_config_dev config_dev = {0}; + int err; + u8 csum_mask; + +#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 +#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 +#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) + return -ENOTSUPP; + + err = mlx4_CONFIG_DEV_get(dev, &config_dev); + if (err) + return err; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; + + params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); + +int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); + config_dev.vxlan_udp_dport = udp_port; + + return mlx4_CONFIG_DEV_set(dev, &config_dev); +} +EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); + +#define CONFIG_DISABLE_RX_PORT BIT(15) +int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); + if (dis) + config_dev.roce_flags = + cpu_to_be32(CONFIG_DISABLE_RX_PORT); + + return mlx4_CONFIG_DEV_set(dev, &config_dev); +} + +int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) +{ + struct mlx4_cmd_mailbox *mailbox; + struct { + __be32 v_port1; + __be32 v_port2; + } *v2p; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + + v2p = mailbox->buf; + v2p->v_port1 = cpu_to_be32(port1); + v2p->v_port2 = cpu_to_be32(port2); + + err = mlx4_cmd(dev, mailbox->dma, 0, + MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + + +int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) +{ + int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, + MLX4_CMD_SET_ICM_SIZE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (ret) + return ret; + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + return 0; +} + +int mlx4_NOP(struct mlx4_dev *dev) +{ + /* Input modifier of 0x1f means "finish as soon as possible." */ + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); +} + +int mlx4_get_phys_port_id(struct mlx4_dev *dev) +{ + u8 port; + u32 *outbox; + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + u32 guid_hi, guid_lo; + int err, ret = 0; +#define MOD_STAT_CFG_PORT_OFFSET 8 +#define MOD_STAT_CFG_GUID_H 0X14 +#define MOD_STAT_CFG_GUID_L 0X1c + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + for (port = 1; port <= dev->caps.num_ports; port++) { + in_mod = port << MOD_STAT_CFG_PORT_OFFSET; + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Fail to get port %d uplink guid\n", + port); + ret = err; + } else { + MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); + MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); + dev->caps.phys_port_id[port] = (u64)guid_lo | + (u64)guid_hi << 32; + } + } + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} + +#define MLX4_WOL_SETUP_MODE (5 << 28) +int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) +{ + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); +} +EXPORT_SYMBOL_GPL(mlx4_wol_read); + +int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) +{ + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} +EXPORT_SYMBOL_GPL(mlx4_wol_write); + +enum { + ADD_TO_MCG = 0x26, +}; + + +void mlx4_opreq_action(struct work_struct *work) +{ + struct mlx4_priv *priv = container_of(work, struct mlx4_priv, + opreq_task); + struct mlx4_dev *dev = &priv->dev; + int num_tasks = atomic_read(&priv->opreq_count); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 *outbox; + u32 modifier; + u16 token; + u16 type; + int err; + u32 num_qps; + struct mlx4_qp qp; + int i; + u8 rem_mcg; + u8 prot; + +#define GET_OP_REQ_MODIFIER_OFFSET 0x08 +#define GET_OP_REQ_TOKEN_OFFSET 0x14 +#define GET_OP_REQ_TYPE_OFFSET 0x1a +#define GET_OP_REQ_DATA_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); + return; + } + outbox = mailbox->buf; + + while (num_tasks) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to retrieve required operation: %d\n", + err); + return; + } + MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); + MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); + MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); + type &= 0xfff; + + switch (type) { + case ADD_TO_MCG: + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); + err = EPERM; + break; + } + mgm = (struct mlx4_mgm *)((u8 *)(outbox) + + GET_OP_REQ_DATA_OFFSET); + num_qps = be32_to_cpu(mgm->members_count) & + MGM_QPN_MASK; + rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; + prot = ((u8 *)(&mgm->members_count))[0] >> 6; + + for (i = 0; i < num_qps; i++) { + qp.qpn = be32_to_cpu(mgm->qp[i]); + if (rem_mcg) + err = mlx4_multicast_detach(dev, &qp, + mgm->gid, + prot, 0); + else + err = mlx4_multicast_attach(dev, &qp, + mgm->gid, + mgm->gid[5] + , 0, prot, + NULL); + if (err) + break; + } + break; + default: + mlx4_warn(dev, "Bad type for required operation\n"); + err = EINVAL; + break; + } + err = mlx4_cmd(dev, 0, ((u32) err | + (__force u32)cpu_to_be32(token) << 16), + 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to acknowledge required request: %d\n", + err); + goto out; + } + memset(outbox, 0, 0xffc); + num_tasks = atomic_dec_return(&priv->opreq_count); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); +} + +static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox) +{ +#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 +#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 +#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 +#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 + + u32 set_attr_mask, getresp_attr_mask; + u32 trap_attr_mask, traprepress_attr_mask; + + MLX4_GET(set_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", + set_attr_mask); + + MLX4_GET(getresp_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", + getresp_attr_mask); + + MLX4_GET(trap_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", + trap_attr_mask); + + MLX4_GET(traprepress_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", + traprepress_attr_mask); + + if (set_attr_mask && getresp_attr_mask && trap_attr_mask && + traprepress_attr_mask) + return 1; + + return 0; +} + +int mlx4_config_mad_demux(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + int secure_host_active; + int err; + + /* Check if mad_demux is supported */ + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); + return -ENOMEM; + } + + /* Query mad_demux to find out which MADs are handled by internal sma */ + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, + MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) { + mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", + err); + goto out; + } + + secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox); + + /* Config mad_demux to handle all MADs returned by the query above */ + err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, + MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) { + mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); + goto out; + } + + if (secure_host_active) + mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +/* Access Reg commands */ +enum mlx4_access_reg_masks { + MLX4_ACCESS_REG_STATUS_MASK = 0x7f, + MLX4_ACCESS_REG_METHOD_MASK = 0x7f, + MLX4_ACCESS_REG_LEN_MASK = 0x7ff +}; + +struct mlx4_access_reg { + __be16 constant1; + u8 status; + u8 resrvd1; + __be16 reg_id; + u8 method; + u8 constant2; + __be32 resrvd2[2]; + __be16 len_const; + __be16 resrvd3; +#define MLX4_ACCESS_REG_HEADER_SIZE (20) + u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; +} __attribute__((__packed__)); + +/** + * mlx4_ACCESS_REG - Generic access reg command. + * @dev: mlx4_dev. + * @reg_id: register ID to access. + * @method: Access method Read/Write. + * @reg_len: register length to Read/Write in bytes. + * @reg_data: reg_data pointer to Read/Write From/To. + * + * Access ConnectX registers FW command. + * Returns 0 on success and copies outbox mlx4_access_reg data + * field into reg_data or a negative error code. + */ +static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, + enum mlx4_access_reg_method method, + u16 reg_len, void *reg_data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_access_reg *inbuf, *outbuf; + int err; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inbuf = inbox->buf; + outbuf = outbox->buf; + + inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); + inbuf->constant2 = 0x1; + inbuf->reg_id = cpu_to_be16(reg_id); + inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; + + reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); + inbuf->len_const = + cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | + ((0x3) << 12)); + + memcpy(inbuf->reg_data, reg_data, reg_len); + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, + MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { + err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; + mlx4_err(dev, + "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", + reg_id, err); + goto out; + } + + memcpy(reg_data, outbuf->reg_data, reg_len); +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return err; +} + +/* ConnectX registers IDs */ +enum mlx4_reg_id { + MLX4_REG_ID_PTYS = 0x5004, +}; + +/** + * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) + * register + * @dev: mlx4_dev. + * @method: Access method Read/Write. + * @ptys_reg: PTYS register data pointer. + * + * Access ConnectX PTYS register, to Read/Write Port Type/Speed + * configuration + * Returns 0 on success or a negative error code. + */ +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg) +{ + return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, + method, sizeof(*ptys_reg), ptys_reg); +} +EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); + +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_access_reg *inbuf = inbox->buf; + u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; + u16 reg_id = be16_to_cpu(inbuf->reg_id); + + if (slave != mlx4_master_func_num(dev) && + method == MLX4_ACCESS_REG_WRITE) + return -EPERM; + + if (reg_id == MLX4_REG_ID_PTYS) { + struct mlx4_ptys_reg *ptys_reg = + (struct mlx4_ptys_reg *)inbuf->reg_data; + + ptys_reg->local_port = + mlx4_slave_convert_port(dev, slave, + ptys_reg->local_port); + } + + return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, + 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h new file mode 100644 index 000000000..07cb7c246 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_FW_H +#define MLX4_FW_H + +#include "mlx4.h" +#include "icm.h" + +struct mlx4_mod_stat_cfg { + u8 log_pg_sz; + u8 log_pg_sz_m; +}; + +struct mlx4_port_cap { + u8 supported_port_types; + u8 suggested_type; + u8 default_sense; + u8 log_max_macs; + u8 log_max_vlans; + int ib_mtu; + int max_port_width; + int max_vl; + int max_gids; + int max_pkeys; + u64 def_mac; + u16 eth_mtu; + int trans_type; + int vendor_oui; + u16 wavelength; + u64 trans_code; + u8 dmfs_optimized_state; +}; + +struct mlx4_dev_cap { + int max_srq_sz; + int max_qp_sz; + int reserved_qps; + int max_qps; + int reserved_srqs; + int max_srqs; + int max_cq_sz; + int reserved_cqs; + int max_cqs; + int max_mpts; + int reserved_eqs; + int max_eqs; + int num_sys_eqs; + int reserved_mtts; + int max_mrw_sz; + int reserved_mrws; + int max_mtt_seg; + int max_requester_per_qp; + int max_responder_per_qp; + int max_rdma_global; + int local_ca_ack_delay; + int num_ports; + u32 max_msg_sz; + u16 stat_rate_support; + int fs_log_max_ucast_qp_range_size; + int fs_max_num_qp_per_entry; + u64 flags; + u64 flags2; + int reserved_uars; + int uar_size; + int min_page_sz; + int bf_reg_size; + int bf_regs_per_page; + int max_sq_sg; + int max_sq_desc_sz; + int max_rq_sg; + int max_rq_desc_sz; + int max_qp_per_mcg; + int reserved_mgms; + int max_mcgs; + int reserved_pds; + int max_pds; + int reserved_xrcds; + int max_xrcds; + int qpc_entry_sz; + int rdmarc_entry_sz; + int altc_entry_sz; + int aux_entry_sz; + int srq_entry_sz; + int cqc_entry_sz; + int eqc_entry_sz; + int dmpt_entry_sz; + int cmpt_entry_sz; + int mtt_entry_sz; + int resize_srq; + u32 bmme_flags; + u32 reserved_lkey; + u64 max_icm_sz; + int max_gso_sz; + int max_rss_tbl_sz; + u32 max_counters; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; + struct mlx4_rate_limit_caps rl_caps; + struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_func_cap { + u8 num_ports; + u8 flags; + u32 pf_context_behaviour; + int qp_quota; + int cq_quota; + int srq_quota; + int mpt_quota; + int mtt_quota; + int max_eq; + int reserved_eq; + int mcg_quota; + u32 qp0_qkey; + u32 qp0_tunnel_qpn; + u32 qp0_proxy_qpn; + u32 qp1_tunnel_qpn; + u32 qp1_proxy_qpn; + u32 reserved_lkey; + u8 physical_port; + u8 port_flags; + u8 flags1; + u64 phys_port_id; + u32 extra_flags; +}; + +struct mlx4_func { + int bus; + int device; + int function; + int physical_function; + int rsvd_eqs; + int max_eq; + int rsvd_uars; +}; + +struct mlx4_adapter { + char board_id[MLX4_BOARD_ID_LEN]; + u8 inta_pin; +}; + +struct mlx4_init_hca_param { + u64 qpc_base; + u64 rdmarc_base; + u64 auxc_base; + u64 altc_base; + u64 srqc_base; + u64 cqc_base; + u64 eqc_base; + u64 mc_base; + u64 dmpt_base; + u64 cmpt_base; + u64 mtt_base; + u64 global_caps; + u16 log_mc_entry_sz; + u16 log_mc_hash_sz; + u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */ + u8 log_num_qps; + u8 log_num_srqs; + u8 log_num_cqs; + u8 log_num_eqs; + u16 num_sys_eqs; + u8 log_rd_per_qp; + u8 log_mc_table_sz; + u8 log_mpt_sz; + u8 log_uar_sz; + u8 mw_enabled; /* Enable memory windows */ + u8 uar_page_sz; /* log pg sz in 4k chunks */ + u8 steering_mode; /* for QUERY_HCA */ + u8 dmfs_high_steer_mode; /* for QUERY_HCA */ + u64 dev_cap_enabled; + u16 cqe_size; /* For use only when CQE stride feature enabled */ + u16 eqe_size; /* For use only when EQE stride feature enabled */ + u8 rss_ip_frags; +}; + +struct mlx4_init_ib_param { + int port_width; + int vl_cap; + int mtu_cap; + u16 gid_cap; + u16 pkey_cap; + int set_guid0; + u64 guid0; + int set_node_guid; + u64 node_guid; + int set_si_guid; + u64 si_guid; +}; + +struct mlx4_set_ib_param { + int set_si_guid; + int reset_qkey_viol; + u64 si_guid; + u32 cap_mask; +}; + +void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); +int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); +int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap); +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, + struct mlx4_func_cap *func_cap); +int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave); +int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_FA(struct mlx4_dev *dev); +int mlx4_RUN_FW(struct mlx4_dev *dev); +int mlx4_QUERY_FW(struct mlx4_dev *dev); +int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); +int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); +int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); +int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); +int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); +int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); +int mlx4_NOP(struct mlx4_dev *dev); +int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); +void mlx4_opreq_action(struct work_struct *work); + +#endif /* MLX4_FW_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c new file mode 100644 index 000000000..8f2fde048 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/export.h> +#include "fw_qos.h" +#include "fw.h" + +enum { + /* allocate vpp opcode modifiers */ + MLX4_ALLOCATE_VPP_ALLOCATE = 0x0, + MLX4_ALLOCATE_VPP_QUERY = 0x1 +}; + +enum { + /* set vport qos opcode modifiers */ + MLX4_SET_VPORT_QOS_SET = 0x0, + MLX4_SET_VPORT_QOS_QUERY = 0x1 +}; + +struct mlx4_set_port_prio2tc_context { + u8 prio2tc[4]; +}; + +struct mlx4_port_scheduler_tc_cfg_be { + __be16 pg; + __be16 bw_precentage; + __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */ + __be16 max_bw_value; +}; + +struct mlx4_set_port_scheduler_context { + struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; +}; + +/* Granular Qos (per VF) section */ +struct mlx4_alloc_vpp_param { + __be32 availible_vpp; + __be32 vpp_p_up[MLX4_NUM_UP]; +}; + +struct mlx4_prio_qos_param { + __be32 bw_share; + __be32 max_avg_bw; + __be32 reserved; + __be32 enable; + __be32 reserved1[4]; +}; + +struct mlx4_set_vport_context { + __be32 reserved[8]; + struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP]; +}; + +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_prio2tc_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i += 2) + context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; + + in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); + +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_scheduler_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + + for (i = 0; i < MLX4_NUM_TC; i++) { + struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; + u16 r; + + if (ratelimit && ratelimit[i]) { + if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) { + r = ratelimit[i]; + tc->max_bw_units = + htons(MLX4_RATELIMIT_100M_UNITS); + } else { + r = ratelimit[i] / 10; + tc->max_bw_units = + htons(MLX4_RATELIMIT_1G_UNITS); + } + tc->max_bw_value = htons(r); + } else { + tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT); + tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS); + } + + tc->pg = htons(pg[i]); + tc->bw_precentage = htons(tc_tx_bw[i]); + } + + in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); + +int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, + u16 *availible_vpp, u8 *vpp_p_up) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_alloc_vpp_param *out_param; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + out_param = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, + MLX4_ALLOCATE_VPP_QUERY, + MLX4_CMD_ALLOCATE_VPP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + /* Total number of supported VPPs */ + *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp); + + for (i = 0; i < MLX4_NUM_UP; i++) + vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get); + +int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_alloc_vpp_param *in_param; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + in_param = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i++) + in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]); + + err = mlx4_cmd(dev, mailbox->dma, port, + MLX4_ALLOCATE_VPP_ALLOCATE, + MLX4_CMD_ALLOCATE_VPP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set); + +int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *out_param) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vport_context *ctx; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ctx = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port, + MLX4_SET_VPORT_QOS_QUERY, + MLX4_CMD_SET_VPORT_QOS, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + for (i = 0; i < MLX4_NUM_UP; i++) { + out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share); + out_param[i].max_avg_bw = + be32_to_cpu(ctx->qos_p_up[i].max_avg_bw); + out_param[i].enable = + !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get); + +int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *in_param) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vport_context *ctx; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ctx = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i++) { + ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share); + ctx->qos_p_up[i].max_avg_bw = + cpu_to_be32(in_param[i].max_avg_bw); + ctx->qos_p_up[i].enable = + cpu_to_be32(in_param[i].enable << 31); + } + + err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port, + MLX4_SET_VPORT_QOS_SET, + MLX4_CMD_SET_VPORT_QOS, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h new file mode 100644 index 000000000..ac1f33187 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_FW_QOS_H +#define MLX4_FW_QOS_H + +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/device.h> + +#define MLX4_NUM_UP 8 +#define MLX4_NUM_TC 8 + +/* Default supported priorities for VPP allocation */ +#define MLX4_DEFAULT_QOS_PRIO (0) + +/* Derived from FW feature definition, 0 is the default vport fo all QPs */ +#define MLX4_VPP_DEFAULT_VPORT (0) + +struct mlx4_vport_qos_param { + u32 bw_share; + u32 max_avg_bw; + u8 enable; +}; + +/** + * mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic + * classes of a given port and device. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @prio2tc: Array of TC associated with each priorities. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); + +/** + * mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between + * traffic classes (ETS) and configured rate limit for traffic classes. + * tc_tx_bw, pg and ratelimit are arrays where each index represents a TC. + * The description for those parameters below refers to a single TC. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @tc_tx_bw: The percentage of the bandwidth allocated for traffic class + * within a TC group. The sum of the bw_percentage of all the traffic + * classes within a TC group must equal 100% for correct operation. + * @pg: The TC group the traffic class is associated with. + * @ratelimit: The maximal bandwidth allowed for the use by this traffic class. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit); +/** + * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation. + * Before distribution of VPPs to priorities, only availible_vpp is returned. + * After initialization it returns the distribution of VPPs among priorities. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @availible_vpp: Pointer to variable where number of availible VPPs is stored + * @vpp_p_up: Distribution of VPPs to priorities is stored in this array + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, + u16 *availible_vpp, u8 *vpp_p_up); +/** + * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities. + * The total number of VPPs assigned to all for a port must not exceed + * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get. + * VPP allocation is allowed only after the port type has been set, + * and while no QPs are open for this port. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vpp_p_up: Allocation of VPPs to different priorities. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up); + +/** + * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport. + * Each priority allowed for the Vport is assigned with a share of the BW, + * and a BW limitation. This commands query the current QoS values. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vport: Vport id. + * @out_param: Array of mlx4_vport_qos_param that will contain the values. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *out_param); + +/** + * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport. + * QoS parameters can be modified at any time, but must be initialized + * before any QP is associated with the VPort. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vport: Vport id. + * @out_param: Array of mlx4_vport_qos_param which holds the requested values. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *in_param); + +#endif /* MLX4_FW_QOS_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c new file mode 100644 index 000000000..2a9dd460a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> + +#include <linux/mlx4/cmd.h> + +#include "mlx4.h" +#include "icm.h" +#include "fw.h" + +/* + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. + */ +enum { + MLX4_ICM_ALLOC_SIZE = 1 << 18, + MLX4_TABLE_CHUNK_SIZE = 1 << 18 +}; + +static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) +{ + int i; + + if (chunk->nsg > 0) + pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + for (i = 0; i < chunk->npages; ++i) + __free_pages(sg_page(&chunk->mem[i]), + get_order(chunk->mem[i].length)); +} + +static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) +{ + int i; + + for (i = 0; i < chunk->npages; ++i) + dma_free_coherent(&dev->persist->pdev->dev, + chunk->mem[i].length, + lowmem_page_address(sg_page(&chunk->mem[i])), + sg_dma_address(&chunk->mem[i])); +} + +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) +{ + struct mlx4_icm_chunk *chunk, *tmp; + + if (!icm) + return; + + list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { + if (coherent) + mlx4_free_icm_coherent(dev, chunk); + else + mlx4_free_icm_pages(dev, chunk); + + kfree(chunk); + } + + kfree(icm); +} + +static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, + gfp_t gfp_mask, int node) +{ + struct page *page; + + page = alloc_pages_node(node, gfp_mask, order); + if (!page) { + page = alloc_pages(gfp_mask, order); + if (!page) + return -ENOMEM; + } + + sg_set_page(mem, page, PAGE_SIZE << order, 0); + return 0; +} + +static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, + int order, gfp_t gfp_mask) +{ + void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, + &sg_dma_address(mem), gfp_mask); + if (!buf) + return -ENOMEM; + + sg_set_buf(mem, buf, PAGE_SIZE << order); + BUG_ON(mem->offset); + sg_dma_len(mem) = PAGE_SIZE << order; + return 0; +} + +struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, + gfp_t gfp_mask, int coherent) +{ + struct mlx4_icm *icm; + struct mlx4_icm_chunk *chunk = NULL; + int cur_order; + int ret; + + /* We use sg_set_buf for coherent allocs, which assumes low memory */ + BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); + + icm = kmalloc_node(sizeof(*icm), + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), + dev->numa_node); + if (!icm) { + icm = kmalloc(sizeof(*icm), + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!icm) + return NULL; + } + + icm->refcount = 0; + INIT_LIST_HEAD(&icm->chunk_list); + + cur_order = get_order(MLX4_ICM_ALLOC_SIZE); + + while (npages > 0) { + if (!chunk) { + chunk = kmalloc_node(sizeof(*chunk), + gfp_mask & ~(__GFP_HIGHMEM | + __GFP_NOWARN), + dev->numa_node); + if (!chunk) { + chunk = kmalloc(sizeof(*chunk), + gfp_mask & ~(__GFP_HIGHMEM | + __GFP_NOWARN)); + if (!chunk) + goto fail; + } + + sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); + chunk->npages = 0; + chunk->nsg = 0; + list_add_tail(&chunk->list, &icm->chunk_list); + } + + while (1 << cur_order > npages) + --cur_order; + + if (coherent) + ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, + &chunk->mem[chunk->npages], + cur_order, gfp_mask); + else + ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], + cur_order, gfp_mask, + dev->numa_node); + + if (ret) { + if (--cur_order < 0) + goto fail; + else + continue; + } + + ++chunk->npages; + + if (coherent) + ++chunk->nsg; + else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + } + + if (chunk->npages == MLX4_ICM_CHUNK_LEN) + chunk = NULL; + + npages -= 1 << cur_order; + } + + if (!coherent && chunk) { + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + } + + return icm; + +fail: + mlx4_free_icm(dev, icm, coherent); + return NULL; +} + +static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); +} + +static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) +{ + return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); +} + +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, + gfp_t gfp) +{ + u32 i = (obj & (table->num_obj - 1)) / + (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + int ret = 0; + + mutex_lock(&table->mutex); + + if (table->icm[i]) { + ++table->icm[i]->refcount; + goto out; + } + + table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, + (table->lowmem ? gfp : GFP_HIGHUSER) | + __GFP_NOWARN, table->coherent); + if (!table->icm[i]) { + ret = -ENOMEM; + goto out; + } + + if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + + (u64) i * MLX4_TABLE_CHUNK_SIZE)) { + mlx4_free_icm(dev, table->icm[i], table->coherent); + table->icm[i] = NULL; + ret = -ENOMEM; + goto out; + } + + ++table->icm[i]->refcount; + +out: + mutex_unlock(&table->mutex); + return ret; +} + +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) +{ + u32 i; + u64 offset; + + i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + + mutex_lock(&table->mutex); + + if (--table->icm[i]->refcount == 0) { + offset = (u64) i * MLX4_TABLE_CHUNK_SIZE; + mlx4_UNMAP_ICM(dev, table->virt + offset, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i], table->coherent); + table->icm[i] = NULL; + } + + mutex_unlock(&table->mutex); +} + +void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, + dma_addr_t *dma_handle) +{ + int offset, dma_offset, i; + u64 idx; + struct mlx4_icm_chunk *chunk; + struct mlx4_icm *icm; + struct page *page = NULL; + + if (!table->lowmem) + return NULL; + + mutex_lock(&table->mutex); + + idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size; + icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; + dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; + + if (!icm) + goto out; + + list_for_each_entry(chunk, &icm->chunk_list, list) { + for (i = 0; i < chunk->npages; ++i) { + if (dma_handle && dma_offset >= 0) { + if (sg_dma_len(&chunk->mem[i]) > dma_offset) + *dma_handle = sg_dma_address(&chunk->mem[i]) + + dma_offset; + dma_offset -= sg_dma_len(&chunk->mem[i]); + } + /* + * DMA mapping can merge pages but not split them, + * so if we found the page, dma_handle has already + * been assigned to. + */ + if (chunk->mem[i].length > offset) { + page = sg_page(&chunk->mem[i]); + goto out; + } + offset -= chunk->mem[i].length; + } + } + +out: + mutex_unlock(&table->mutex); + return page ? lowmem_page_address(page) + offset : NULL; +} + +int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end) +{ + int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; + int err; + u32 i; + + for (i = start; i <= end; i += inc) { + err = mlx4_table_get(dev, table, i, GFP_KERNEL); + if (err) + goto fail; + } + + return 0; + +fail: + while (i > start) { + i -= inc; + mlx4_table_put(dev, table, i); + } + + return err; +} + +void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end) +{ + u32 i; + + for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) + mlx4_table_put(dev, table, i); +} + +int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u64 virt, int obj_size, u32 nobj, int reserved, + int use_lowmem, int use_coherent) +{ + int obj_per_chunk; + int num_icm; + unsigned chunk_size; + int i; + u64 size; + + obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; + num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; + + table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); + if (!table->icm) + return -ENOMEM; + table->virt = virt; + table->num_icm = num_icm; + table->num_obj = nobj; + table->obj_size = obj_size; + table->lowmem = use_lowmem; + table->coherent = use_coherent; + mutex_init(&table->mutex); + + size = (u64) nobj * obj_size; + for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { + chunk_size = MLX4_TABLE_CHUNK_SIZE; + if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) + chunk_size = PAGE_ALIGN(size - + i * MLX4_TABLE_CHUNK_SIZE); + + table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, + (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN, use_coherent); + if (!table->icm[i]) + goto err; + if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { + mlx4_free_icm(dev, table->icm[i], use_coherent); + table->icm[i] = NULL; + goto err; + } + + /* + * Add a reference to this ICM chunk so that it never + * gets freed (since it contains reserved firmware objects). + */ + ++table->icm[i]->refcount; + } + + return 0; + +err: + for (i = 0; i < num_icm; ++i) + if (table->icm[i]) { + mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i], use_coherent); + } + + kfree(table->icm); + + return -ENOMEM; +} + +void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) +{ + int i; + + for (i = 0; i < table->num_icm; ++i) + if (table->icm[i]) { + mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i], table->coherent); + } + + kfree(table->icm); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h new file mode 100644 index 000000000..0c7364550 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_ICM_H +#define MLX4_ICM_H + +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/mutex.h> + +#define MLX4_ICM_CHUNK_LEN \ + ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ + (sizeof (struct scatterlist))) + +enum { + MLX4_ICM_PAGE_SHIFT = 12, + MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, +}; + +struct mlx4_icm_chunk { + struct list_head list; + int npages; + int nsg; + struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; +}; + +struct mlx4_icm { + struct list_head chunk_list; + int refcount; +}; + +struct mlx4_icm_iter { + struct mlx4_icm *icm; + struct mlx4_icm_chunk *chunk; + int page_idx; +}; + +struct mlx4_dev; + +struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, + gfp_t gfp_mask, int coherent); +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); + +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, + gfp_t gfp); +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); +int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end); +void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end); +int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u64 virt, int obj_size, u32 nobj, int reserved, + int use_lowmem, int use_coherent); +void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); +void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle); + +static inline void mlx4_icm_first(struct mlx4_icm *icm, + struct mlx4_icm_iter *iter) +{ + iter->icm = icm; + iter->chunk = list_empty(&icm->chunk_list) ? + NULL : list_entry(icm->chunk_list.next, + struct mlx4_icm_chunk, list); + iter->page_idx = 0; +} + +static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) +{ + return !iter->chunk; +} + +static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) +{ + if (++iter->page_idx >= iter->chunk->nsg) { + if (iter->chunk->list.next == &iter->icm->chunk_list) { + iter->chunk = NULL; + return; + } + + iter->chunk = list_entry(iter->chunk->list.next, + struct mlx4_icm_chunk, list); + iter->page_idx = 0; + } +} + +static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) +{ + return sg_dma_address(&iter->chunk->mem[iter->page_idx]); +} + +static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) +{ + return sg_dma_len(&iter->chunk->mem[iter->page_idx]); +} + +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); + +#endif /* MLX4_ICM_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c new file mode 100644 index 000000000..0d80aed59 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/errno.h> + +#include "mlx4.h" + +struct mlx4_device_context { + struct list_head list; + struct list_head bond_list; + struct mlx4_interface *intf; + void *context; +}; + +static LIST_HEAD(intf_list); +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(intf_mutex); + +static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +{ + struct mlx4_device_context *dev_ctx; + + dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL); + if (!dev_ctx) + return; + + dev_ctx->intf = intf; + dev_ctx->context = intf->add(&priv->dev); + + if (dev_ctx->context) { + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else + kfree(dev_ctx); +} + +static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +{ + struct mlx4_device_context *dev_ctx; + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) { + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + intf->remove(&priv->dev, dev_ctx->context); + kfree(dev_ctx); + return; + } +} + +int mlx4_register_interface(struct mlx4_interface *intf) +{ + struct mlx4_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&intf_mutex); + + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &dev_list, dev_list) { + if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) { + mlx4_dbg(&priv->dev, + "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol); + intf->flags &= ~MLX4_INTFF_BONDING; + } + mlx4_add_device(intf, priv); + } + + mutex_unlock(&intf_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_register_interface); + +void mlx4_unregister_interface(struct mlx4_interface *intf) +{ + struct mlx4_priv *priv; + + mutex_lock(&intf_mutex); + + list_for_each_entry(priv, &dev_list, dev_list) + mlx4_remove_device(intf, priv); + + list_del(&intf->list); + + mutex_unlock(&intf_mutex); +} +EXPORT_SYMBOL_GPL(mlx4_unregister_interface); + +int mlx4_do_bond(struct mlx4_dev *dev, bool enable) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx; + unsigned long flags; + int ret; + LIST_HEAD(bond_list); + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + ret = mlx4_disable_rx_port_check(dev, enable); + if (ret) { + mlx4_err(dev, "Fail to %s rx port check\n", + enable ? "enable" : "disable"); + return ret; + } + if (enable) { + dev->flags |= MLX4_FLAG_BONDED; + } else { + ret = mlx4_virt2phy_port_map(dev, 1, 2); + if (ret) { + mlx4_err(dev, "Fail to reset port map\n"); + return ret; + } + dev->flags &= ~MLX4_FLAG_BONDED; + } + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) { + if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) { + list_add_tail(&dev_ctx->bond_list, &bond_list); + list_del(&dev_ctx->list); + } + } + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &bond_list, bond_list) { + dev_ctx->intf->remove(dev, dev_ctx->context); + dev_ctx->context = dev_ctx->intf->add(dev); + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", + dev_ctx->intf->protocol, enable ? + "enabled" : "disabled"); + } + return 0; +} + +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, + unsigned long param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf->event) + dev_ctx->intf->event(dev, dev_ctx->context, type, param); + + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +int mlx4_register_device(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_interface *intf; + + mutex_lock(&intf_mutex); + + dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP; + list_add_tail(&priv->dev_list, &dev_list); + list_for_each_entry(intf, &intf_list, list) + mlx4_add_device(intf, priv); + + mutex_unlock(&intf_mutex); + mlx4_start_catas_poll(dev); + + return 0; +} + +void mlx4_unregister_device(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_interface *intf; + + mlx4_stop_catas_poll(dev); + mutex_lock(&intf_mutex); + + list_for_each_entry(intf, &intf_list, list) + mlx4_remove_device(intf, priv); + + list_del(&priv->dev_list); + dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP; + + mutex_unlock(&intf_mutex); +} + +void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx; + unsigned long flags; + void *result = NULL; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) { + result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); + break; + } + + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + return result; +} +EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c new file mode 100644 index 000000000..ced5ecab5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -0,0 +1,3748 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/io-mapping.h> +#include <linux/delay.h> +#include <linux/kmod.h> + +#include <linux/mlx4/device.h> +#include <linux/mlx4/doorbell.h> + +#include "mlx4.h" +#include "fw.h" +#include "icm.h" + +MODULE_AUTHOR("Roland Dreier"); +MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +struct workqueue_struct *mlx4_wq; + +#ifdef CONFIG_MLX4_DEBUG + +int mlx4_debug_level = 0; +module_param_named(debug_level, mlx4_debug_level, int, 0644); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); + +#endif /* CONFIG_MLX4_DEBUG */ + +#ifdef CONFIG_PCI_MSI + +static int msi_x = 1; +module_param(msi_x, int, 0444); +MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); + +#else /* CONFIG_PCI_MSI */ + +#define msi_x (0) + +#endif /* CONFIG_PCI_MSI */ + +static uint8_t num_vfs[3] = {0, 0, 0}; +static int num_vfs_argc; +module_param_array(num_vfs, byte , &num_vfs_argc, 0444); +MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" + "num_vfs=port1,port2,port1+2"); + +static uint8_t probe_vf[3] = {0, 0, 0}; +static int probe_vfs_argc; +module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); +MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" + "probe_vf=port1,port2,port1+2"); + +int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; +module_param_named(log_num_mgm_entry_size, + mlx4_log_num_mgm_entry_size, int, 0444); +MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" + " of qp per mcg, for example:" + " 10 gives 248.range: 7 <=" + " log_num_mgm_entry_size <= 12." + " To activate device managed" + " flow steering when available, set to -1"); + +static bool enable_64b_cqe_eqe = true; +module_param(enable_64b_cqe_eqe, bool, 0444); +MODULE_PARM_DESC(enable_64b_cqe_eqe, + "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); + +#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ + MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ + MLX4_FUNC_CAP_DMFS_A0_STATIC) + +#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) + +static char mlx4_version[] = + DRV_NAME ": Mellanox ConnectX core driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +static struct mlx4_profile default_profile = { + .num_qp = 1 << 18, + .num_srq = 1 << 16, + .rdmarc_per_qp = 1 << 4, + .num_cq = 1 << 16, + .num_mcg = 1 << 13, + .num_mpt = 1 << 19, + .num_mtt = 1 << 20, /* It is really num mtt segements */ +}; + +static struct mlx4_profile low_mem_profile = { + .num_qp = 1 << 17, + .num_srq = 1 << 6, + .rdmarc_per_qp = 1 << 4, + .num_cq = 1 << 8, + .num_mcg = 1 << 8, + .num_mpt = 1 << 9, + .num_mtt = 1 << 7, +}; + +static int log_num_mac = 7; +module_param_named(log_num_mac, log_num_mac, int, 0444); +MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); + +static int log_num_vlan; +module_param_named(log_num_vlan, log_num_vlan, int, 0444); +MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); +/* Log2 max number of VLANs per ETH port (0-7) */ +#define MLX4_LOG_NUM_VLANS 7 +#define MLX4_MIN_LOG_NUM_VLANS 0 +#define MLX4_MIN_LOG_NUM_MAC 1 + +static bool use_prio; +module_param_named(use_prio, use_prio, bool, 0444); +MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); + +int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); + +static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; +static int arr_argc = 2; +module_param_array(port_type_array, int, &arr_argc, 0444); +MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " + "1 for IB, 2 for Ethernet"); + +struct mlx4_port_config { + struct list_head list; + enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; + struct pci_dev *pdev; +}; + +static atomic_t pf_loading = ATOMIC_INIT(0); + +int mlx4_check_port_params(struct mlx4_dev *dev, + enum mlx4_port_type *port_type) +{ + int i; + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { + for (i = 0; i < dev->caps.num_ports - 1; i++) { + if (port_type[i] != port_type[i + 1]) { + mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); + return -EINVAL; + } + } + } + + for (i = 0; i < dev->caps.num_ports; i++) { + if (!(port_type[i] & dev->caps.supported_type[i+1])) { + mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", + i + 1); + return -EINVAL; + } + } + return 0; +} + +static void mlx4_set_port_mask(struct mlx4_dev *dev) +{ + int i; + + for (i = 1; i <= dev->caps.num_ports; ++i) + dev->caps.port_mask[i] = dev->caps.port_type[i]; +} + +enum { + MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, +}; + +static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err = 0; + struct mlx4_func func; + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_QUERY_FUNC(dev, &func, 0); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + return err; + } + dev_cap->max_eqs = func.max_eq; + dev_cap->reserved_eqs = func.rsvd_eqs; + dev_cap->reserved_uars = func.rsvd_uars; + err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; + } + return err; +} + +static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) +{ + struct mlx4_caps *dev_cap = &dev->caps; + + /* FW not supporting or cancelled by user */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || + !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) + return; + + /* Must have 64B CQE_EQE enabled by FW to use bigger stride + * When FW has NCSI it may decide not to report 64B CQE/EQEs + */ + if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || + !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + return; + } + + if (cache_line_size() == 128 || cache_line_size() == 256) { + mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); + /* Changing the real data inside CQE size to 32B */ + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + + if (mlx4_is_master(dev)) + dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; + } else { + if (cache_line_size() != 32 && cache_line_size() != 64) + mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } +} + +static int _mlx4_dev_port(struct mlx4_dev *dev, int port, + struct mlx4_port_cap *port_cap) +{ + dev->caps.vl_cap[port] = port_cap->max_vl; + dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; + dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; + dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; + /* set gid and pkey table operating lengths by default + * to non-sriov values + */ + dev->caps.gid_table_len[port] = port_cap->max_gids; + dev->caps.pkey_table_len[port] = port_cap->max_pkeys; + dev->caps.port_width_cap[port] = port_cap->max_port_width; + dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.def_mac[port] = port_cap->def_mac; + dev->caps.supported_type[port] = port_cap->supported_port_types; + dev->caps.suggested_type[port] = port_cap->suggested_type; + dev->caps.default_sense[port] = port_cap->default_sense; + dev->caps.trans_type[port] = port_cap->trans_type; + dev->caps.vendor_oui[port] = port_cap->vendor_oui; + dev->caps.wavelength[port] = port_cap->wavelength; + dev->caps.trans_code[port] = port_cap->trans_code; + + return 0; +} + +static int mlx4_dev_port(struct mlx4_dev *dev, int port, + struct mlx4_port_cap *port_cap) +{ + int err = 0; + + err = mlx4_QUERY_PORT(dev, port, port_cap); + + if (err) + mlx4_err(dev, "QUERY_PORT command failed.\n"); + + return err; +} + +static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) +{ + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) + return; + + if (mlx4_is_mfunc(dev)) { + mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + return; + } + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { + mlx4_dbg(dev, + "Keep FCS is not supported - Disabling Ignore FCS"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + return; + } +} + +#define MLX4_A0_STEERING_TABLE_SIZE 256 +static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err; + int i; + + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); + return err; + } + mlx4_dev_cap_dump(dev, dev_cap); + + if (dev_cap->min_page_sz > PAGE_SIZE) { + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", + dev_cap->min_page_sz, PAGE_SIZE); + return -ENODEV; + } + if (dev_cap->num_ports > MLX4_MAX_PORTS) { + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", + dev_cap->num_ports, MLX4_MAX_PORTS); + return -ENODEV; + } + + if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { + mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", + dev_cap->uar_size, + (unsigned long long) + pci_resource_len(dev->persist->pdev, 2)); + return -ENODEV; + } + + dev->caps.num_ports = dev_cap->num_ports; + dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; + dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? + dev->caps.num_sys_eqs : + MLX4_MAX_EQ_NUM; + for (i = 1; i <= dev->caps.num_ports; ++i) { + err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); + if (err) { + mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); + return err; + } + } + + dev->caps.uar_page_size = PAGE_SIZE; + dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; + dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; + dev->caps.bf_reg_size = dev_cap->bf_reg_size; + dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; + dev->caps.max_sq_sg = dev_cap->max_sq_sg; + dev->caps.max_rq_sg = dev_cap->max_rq_sg; + dev->caps.max_wqes = dev_cap->max_qp_sz; + dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; + dev->caps.max_srq_wqes = dev_cap->max_srq_sz; + dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; + dev->caps.reserved_srqs = dev_cap->reserved_srqs; + dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; + dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; + /* + * Subtract 1 from the limit because we need to allocate a + * spare CQE so the HCA HW can tell the difference between an + * empty CQ and a full CQ. + */ + dev->caps.max_cqes = dev_cap->max_cq_sz - 1; + dev->caps.reserved_cqs = dev_cap->reserved_cqs; + dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.reserved_mtts = dev_cap->reserved_mtts; + dev->caps.reserved_mrws = dev_cap->reserved_mrws; + + /* The first 128 UARs are used for EQ doorbells */ + dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); + dev->caps.reserved_pds = dev_cap->reserved_pds; + dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->reserved_xrcds : 0; + dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->max_xrcds : 0; + dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; + + dev->caps.max_msg_sz = dev_cap->max_msg_sz; + dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); + dev->caps.flags = dev_cap->flags; + dev->caps.flags2 = dev_cap->flags2; + dev->caps.bmme_flags = dev_cap->bmme_flags; + dev->caps.reserved_lkey = dev_cap->reserved_lkey; + dev->caps.stat_rate_support = dev_cap->stat_rate_support; + dev->caps.max_gso_sz = dev_cap->max_gso_sz; + dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + + /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ + if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) + dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; + /* Don't do sense port on multifunction devices (for now at least) */ + if (mlx4_is_mfunc(dev)) + dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; + + if (mlx4_low_memory_profile()) { + dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; + dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; + } else { + dev->caps.log_num_macs = log_num_mac; + dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; + } + + for (i = 1; i <= dev->caps.num_ports; ++i) { + dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; + if (dev->caps.supported_type[i]) { + /* if only ETH is supported - assign ETH */ + if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) + dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; + /* if only IB is supported, assign IB */ + else if (dev->caps.supported_type[i] == + MLX4_PORT_TYPE_IB) + dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; + else { + /* if IB and ETH are supported, we set the port + * type according to user selection of port type; + * if user selected none, take the FW hint */ + if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) + dev->caps.port_type[i] = dev->caps.suggested_type[i] ? + MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; + else + dev->caps.port_type[i] = port_type_array[i - 1]; + } + } + /* + * Link sensing is allowed on the port if 3 conditions are true: + * 1. Both protocols are supported on the port. + * 2. Different types are supported on the port + * 3. FW declared that it supports link sensing + */ + mlx4_priv(dev)->sense.sense_allowed[i] = + ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && + (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && + (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); + + /* + * If "default_sense" bit is set, we move the port to "AUTO" mode + * and perform sense_port FW command to try and set the correct + * port type from beginning + */ + if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { + enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; + dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; + mlx4_SENSE_PORT(dev, i, &sensed_port); + if (sensed_port != MLX4_PORT_TYPE_NONE) + dev->caps.port_type[i] = sensed_port; + } else { + dev->caps.possible_type[i] = dev->caps.port_type[i]; + } + + if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { + dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; + mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", + i, 1 << dev->caps.log_num_macs); + } + if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { + dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; + mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", + i, 1 << dev->caps.log_num_vlans); + } + } + + dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = + (1 << dev->caps.log_num_macs) * + (1 << dev->caps.log_num_vlans) * + dev->caps.num_ports; + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; + + if (dev_cap->dmfs_high_rate_qpn_base > 0 && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) + dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; + else + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + + if (dev_cap->dmfs_high_rate_qpn_range > 0 && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { + dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; + dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; + } else { + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; + } + + dev->caps.rl_caps = dev_cap->rl_caps; + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = + dev->caps.dmfs_high_rate_qpn_range; + + dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; + + dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; + + if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { + if (dev_cap->flags & + (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { + mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); + dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + } + + if (dev_cap->flags2 & + (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | + MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { + mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } + } + + if ((dev->caps.flags & + (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && + mlx4_is_master(dev)) + dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; + + if (!mlx4_is_slave(dev)) { + mlx4_enable_cqe_eqe_stride(dev); + dev->caps.alloc_res_qp_mask = + (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | + MLX4_RESERVE_A0_QP; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && + dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { + mlx4_warn(dev, "Old device ETS support detected\n"); + mlx4_warn(dev, "Consider upgrading device FW.\n"); + dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; + } + + } else { + dev->caps.alloc_res_qp_mask = 0; + } + + mlx4_enable_ignore_fcs(dev); + + return 0; +} + +static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u32 lnkcap1, lnkcap2; + int err1, err2; + +#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, + &lnkcap1); + err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, + &lnkcap2); + if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + *speed = PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + if (!err1) { + *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; + if (!lnkcap2) { /* pre-r3.0 */ + if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + } + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { + return err1 ? err1 : + err2 ? err2 : -EINVAL; + } + return 0; +} + +static void mlx4_check_pcie_caps(struct mlx4_dev *dev) +{ + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + int err; + +#define PCIE_SPEED_STR(speed) \ + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ + "Unknown") + + err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); + if (err) { + mlx4_warn(dev, + "Unable to determine PCIe device BW capabilities\n"); + return; + } + + err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); + if (err || speed == PCI_SPEED_UNKNOWN || + width == PCIE_LNK_WIDTH_UNKNOWN) { + mlx4_warn(dev, + "Unable to determine PCI device chain minimum BW\n"); + return; + } + + if (width != width_cap || speed != speed_cap) + mlx4_warn(dev, + "PCIe BW is different than device's capability\n"); + + mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", + PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); + mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", + width, width_cap); + return; +} + +/*The function checks if there are live vf, return the num of them*/ +static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state; + int i; + int ret = 0; + + for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { + s_state = &priv->mfunc.master.slave_state[i]; + if (s_state->active && s_state->last_cmd != + MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "%s: slave: %d is still active\n", + __func__, i); + ret++; + } + } + return ret; +} + +int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) +{ + u32 qk = MLX4_RESERVED_QKEY_BASE; + + if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || + qpn < dev->phys_caps.base_proxy_sqpn) + return -EINVAL; + + if (qpn >= dev->phys_caps.base_tunnel_sqpn) + /* tunnel qp */ + qk += qpn - dev->phys_caps.base_tunnel_sqpn; + else + qk += qpn - dev->phys_caps.base_proxy_sqpn; + *qkey = qk; + return 0; +} +EXPORT_SYMBOL(mlx4_get_parav_qkey); + +void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) +{ + struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); + + if (!mlx4_is_master(dev)) + return; + + priv->virt2phys_pkey[slave][port - 1][i] = val; +} +EXPORT_SYMBOL(mlx4_sync_pkey_table); + +void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) +{ + struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); + + if (!mlx4_is_master(dev)) + return; + + priv->slave_node_guids[slave] = guid; +} +EXPORT_SYMBOL(mlx4_put_slave_node_guid); + +__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); + + if (!mlx4_is_master(dev)) + return 0; + + return priv->slave_node_guids[slave]; +} +EXPORT_SYMBOL(mlx4_get_slave_node_guid); + +int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_slave; + + if (!mlx4_is_master(dev)) + return 0; + + s_slave = &priv->mfunc.master.slave_state[slave]; + return !!s_slave->active; +} +EXPORT_SYMBOL(mlx4_is_slave_active); + +static void slave_adjust_steering_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *hca_param) +{ + dev->caps.steering_mode = hca_param->steering_mode; + if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; + dev->caps.fs_log_max_ucast_qp_range_size = + dev_cap->fs_log_max_ucast_qp_range_size; + } else + dev->caps.num_qp_per_mgm = + 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); + + mlx4_dbg(dev, "Steering mode is: %s\n", + mlx4_steering_mode_str(dev->caps.steering_mode)); +} + +static int mlx4_slave_cap(struct mlx4_dev *dev) +{ + int err; + u32 page_size; + struct mlx4_dev_cap dev_cap; + struct mlx4_func_cap func_cap; + struct mlx4_init_hca_param hca_param; + u8 i; + + memset(&hca_param, 0, sizeof(hca_param)); + err = mlx4_QUERY_HCA(dev, &hca_param); + if (err) { + mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); + return err; + } + + /* fail if the hca has an unknown global capability + * at this time global_caps should be always zeroed + */ + if (hca_param.global_caps) { + mlx4_err(dev, "Unknown hca global capabilities\n"); + return -ENOSYS; + } + + mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; + + dev->caps.hca_core_clock = hca_param.hca_core_clock; + + memset(&dev_cap, 0, sizeof(dev_cap)); + dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); + return err; + } + + err = mlx4_QUERY_FW(dev); + if (err) + mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); + + page_size = ~dev->caps.page_size_cap + 1; + mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); + if (page_size > PAGE_SIZE) { + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", + page_size, PAGE_SIZE); + return -ENODEV; + } + + /* slave gets uar page size from QUERY_HCA fw command */ + dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); + + /* TODO: relax this assumption */ + if (dev->caps.uar_page_size != PAGE_SIZE) { + mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", + dev->caps.uar_page_size, PAGE_SIZE); + return -ENODEV; + } + + memset(&func_cap, 0, sizeof(func_cap)); + err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); + if (err) { + mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", + err); + return err; + } + + if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != + PF_CONTEXT_BEHAVIOUR_MASK) { + mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", + func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); + return -ENOSYS; + } + + dev->caps.num_ports = func_cap.num_ports; + dev->quotas.qp = func_cap.qp_quota; + dev->quotas.srq = func_cap.srq_quota; + dev->quotas.cq = func_cap.cq_quota; + dev->quotas.mpt = func_cap.mpt_quota; + dev->quotas.mtt = func_cap.mtt_quota; + dev->caps.num_qps = 1 << hca_param.log_num_qps; + dev->caps.num_srqs = 1 << hca_param.log_num_srqs; + dev->caps.num_cqs = 1 << hca_param.log_num_cqs; + dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; + dev->caps.num_eqs = func_cap.max_eq; + dev->caps.reserved_eqs = func_cap.reserved_eq; + dev->caps.reserved_lkey = func_cap.reserved_lkey; + dev->caps.num_pds = MLX4_NUM_PDS; + dev->caps.num_mgms = 0; + dev->caps.num_amgms = 0; + + if (dev->caps.num_ports > MLX4_MAX_PORTS) { + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", + dev->caps.num_ports, MLX4_MAX_PORTS); + return -ENODEV; + } + + dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + + if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || + !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || + !dev->caps.qp0_qkey) { + err = -ENOMEM; + goto err_mem; + } + + for (i = 1; i <= dev->caps.num_ports; ++i) { + err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); + if (err) { + mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", + i, err); + goto err_mem; + } + dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; + dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; + dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; + dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; + dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; + dev->caps.port_mask[i] = dev->caps.port_type[i]; + dev->caps.phys_port_id[i] = func_cap.phys_port_id; + if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, + &dev->caps.gid_table_len[i], + &dev->caps.pkey_table_len[i])) + goto err_mem; + } + + if (dev->caps.uar_page_size * (dev->caps.num_uars - + dev->caps.reserved_uars) > + pci_resource_len(dev->persist->pdev, + 2)) { + mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", + dev->caps.uar_page_size * dev->caps.num_uars, + (unsigned long long) + pci_resource_len(dev->persist->pdev, 2)); + goto err_mem; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { + dev->caps.eqe_size = 64; + dev->caps.eqe_factor = 1; + } else { + dev->caps.eqe_size = 32; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { + dev->caps.cqe_size = 64; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } else { + dev->caps.cqe_size = 32; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { + dev->caps.eqe_size = hca_param.eqe_size; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { + dev->caps.cqe_size = hca_param.cqe_size; + /* User still need to know when CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); + + slave_adjust_steering_mode(dev, &dev_cap, &hca_param); + mlx4_dbg(dev, "RSS support for IP fragments is %s\n", + hca_param.rss_ip_frags ? "on" : "off"); + + if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && + dev->caps.bf_reg_size) + dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; + + if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) + dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; + + return 0; + +err_mem: + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + dev->caps.qp0_qkey = NULL; + dev->caps.qp0_tunnel = NULL; + dev->caps.qp0_proxy = NULL; + dev->caps.qp1_tunnel = NULL; + dev->caps.qp1_proxy = NULL; + + return err; +} + +static void mlx4_request_modules(struct mlx4_dev *dev) +{ + int port; + int has_ib_port = false; + int has_eth_port = false; +#define EN_DRV_NAME "mlx4_en" +#define IB_DRV_NAME "mlx4_ib" + + for (port = 1; port <= dev->caps.num_ports; port++) { + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) + has_ib_port = true; + else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + has_eth_port = true; + } + + if (has_eth_port) + request_module_nowait(EN_DRV_NAME); + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) + request_module_nowait(IB_DRV_NAME); +} + +/* + * Change the port configuration of the device. + * Every user of this function must hold the port mutex. + */ +int mlx4_change_port_types(struct mlx4_dev *dev, + enum mlx4_port_type *port_types) +{ + int err = 0; + int change = 0; + int port; + + for (port = 0; port < dev->caps.num_ports; port++) { + /* Change the port type only if the new type is different + * from the current, and not set to Auto */ + if (port_types[port] != dev->caps.port_type[port + 1]) + change = 1; + } + if (change) { + mlx4_unregister_device(dev); + for (port = 1; port <= dev->caps.num_ports; port++) { + mlx4_CLOSE_PORT(dev, port); + dev->caps.port_type[port] = port_types[port - 1]; + err = mlx4_SET_PORT(dev, port, -1); + if (err) { + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); + goto out; + } + } + mlx4_set_port_mask(dev); + err = mlx4_register_device(dev); + if (err) { + mlx4_err(dev, "Failed to register device\n"); + goto out; + } + mlx4_request_modules(dev); + } + +out: + return err; +} + +static ssize_t show_port_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_attr); + struct mlx4_dev *mdev = info->dev; + char type[8]; + + sprintf(type, "%s", + (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? + "ib" : "eth"); + if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) + sprintf(buf, "auto (%s)\n", type); + else + sprintf(buf, "%s\n", type); + + return strlen(buf); +} + +static ssize_t set_port_type(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_attr); + struct mlx4_dev *mdev = info->dev; + struct mlx4_priv *priv = mlx4_priv(mdev); + enum mlx4_port_type types[MLX4_MAX_PORTS]; + enum mlx4_port_type new_types[MLX4_MAX_PORTS]; + static DEFINE_MUTEX(set_port_type_mutex); + int i; + int err = 0; + + mutex_lock(&set_port_type_mutex); + + if (!strcmp(buf, "ib\n")) + info->tmp_type = MLX4_PORT_TYPE_IB; + else if (!strcmp(buf, "eth\n")) + info->tmp_type = MLX4_PORT_TYPE_ETH; + else if (!strcmp(buf, "auto\n")) + info->tmp_type = MLX4_PORT_TYPE_AUTO; + else { + mlx4_err(mdev, "%s is not supported port type\n", buf); + err = -EINVAL; + goto err_out; + } + + mlx4_stop_sense(mdev); + mutex_lock(&priv->port_mutex); + /* Possible type is always the one that was delivered */ + mdev->caps.possible_type[info->port] = info->tmp_type; + + for (i = 0; i < mdev->caps.num_ports; i++) { + types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : + mdev->caps.possible_type[i+1]; + if (types[i] == MLX4_PORT_TYPE_AUTO) + types[i] = mdev->caps.port_type[i+1]; + } + + if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && + !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { + for (i = 1; i <= mdev->caps.num_ports; i++) { + if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { + mdev->caps.possible_type[i] = mdev->caps.port_type[i]; + err = -EINVAL; + } + } + } + if (err) { + mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); + goto out; + } + + mlx4_do_sense_ports(mdev, new_types, types); + + err = mlx4_check_port_params(mdev, new_types); + if (err) + goto out; + + /* We are about to apply the changes after the configuration + * was verified, no need to remember the temporary types + * any more */ + for (i = 0; i < mdev->caps.num_ports; i++) + priv->port[i + 1].tmp_type = 0; + + err = mlx4_change_port_types(mdev, new_types); + +out: + mlx4_start_sense(mdev); + mutex_unlock(&priv->port_mutex); +err_out: + mutex_unlock(&set_port_type_mutex); + + return err ? err : count; +} + +enum ibta_mtu { + IB_MTU_256 = 1, + IB_MTU_512 = 2, + IB_MTU_1024 = 3, + IB_MTU_2048 = 4, + IB_MTU_4096 = 5 +}; + +static inline int int_to_ibta_mtu(int mtu) +{ + switch (mtu) { + case 256: return IB_MTU_256; + case 512: return IB_MTU_512; + case 1024: return IB_MTU_1024; + case 2048: return IB_MTU_2048; + case 4096: return IB_MTU_4096; + default: return -1; + } +} + +static inline int ibta_mtu_to_int(enum ibta_mtu mtu) +{ + switch (mtu) { + case IB_MTU_256: return 256; + case IB_MTU_512: return 512; + case IB_MTU_1024: return 1024; + case IB_MTU_2048: return 2048; + case IB_MTU_4096: return 4096; + default: return -1; + } +} + +static ssize_t show_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + + sprintf(buf, "%d\n", + ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); + return strlen(buf); +} + +static ssize_t set_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + struct mlx4_priv *priv = mlx4_priv(mdev); + int err, port, mtu, ibta_mtu = -1; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + return -EINVAL; + } + + err = kstrtoint(buf, 0, &mtu); + if (!err) + ibta_mtu = int_to_ibta_mtu(mtu); + + if (err || ibta_mtu < 0) { + mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); + return -EINVAL; + } + + mdev->caps.port_ib_mtu[info->port] = ibta_mtu; + + mlx4_stop_sense(mdev); + mutex_lock(&priv->port_mutex); + mlx4_unregister_device(mdev); + for (port = 1; port <= mdev->caps.num_ports; port++) { + mlx4_CLOSE_PORT(mdev, port); + err = mlx4_SET_PORT(mdev, port, -1); + if (err) { + mlx4_err(mdev, "Failed to set port %d, aborting\n", + port); + goto err_set_port; + } + } + err = mlx4_register_device(mdev); +err_set_port: + mutex_unlock(&priv->port_mutex); + mlx4_start_sense(mdev); + return err ? err : count; +} + +int mlx4_bond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (!mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, true); + else + ret = 0; + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to bond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is bonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_bond); + +int mlx4_unbond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, false); + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to unbond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is unbonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_unbond); + + +int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) +{ + u8 port1 = v2p->port1; + u8 port2 = v2p->port2; + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + mutex_lock(&priv->bond_mutex); + + /* zero means keep current mapping for this port */ + if (port1 == 0) + port1 = priv->v2p.port1; + if (port2 == 0) + port2 = priv->v2p.port2; + + if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || + (port2 < 1) || (port2 > MLX4_MAX_PORTS) || + (port1 == 2 && port2 == 1)) { + /* besides boundary checks cross mapping makes + * no sense and therefore not allowed */ + err = -EINVAL; + } else if ((port1 == priv->v2p.port1) && + (port2 == priv->v2p.port2)) { + err = 0; + } else { + err = mlx4_virt2phy_port_map(dev, port1, port2); + if (!err) { + mlx4_dbg(dev, "port map changed: [%d][%d]\n", + port1, port2); + priv->v2p.port1 = port1; + priv->v2p.port2 = port2; + } else { + mlx4_err(dev, "Failed to change port mape: %d\n", err); + } + } + + mutex_unlock(&priv->bond_mutex); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_port_map_set); + +static int mlx4_load_fw(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, + GFP_HIGHUSER | __GFP_NOWARN, 0); + if (!priv->fw.fw_icm) { + mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); + return -ENOMEM; + } + + err = mlx4_MAP_FA(dev, priv->fw.fw_icm); + if (err) { + mlx4_err(dev, "MAP_FA command failed, aborting\n"); + goto err_free; + } + + err = mlx4_RUN_FW(dev); + if (err) { + mlx4_err(dev, "RUN_FW command failed, aborting\n"); + goto err_unmap_fa; + } + + return 0; + +err_unmap_fa: + mlx4_UNMAP_FA(dev); + +err_free: + mlx4_free_icm(dev, priv->fw.fw_icm, 0); + return err; +} + +static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, + int cmpt_entry_sz) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int num_eqs; + + err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_QP * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) + goto err; + + err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_SRQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_srqs, + dev->caps.reserved_srqs, 0, 0); + if (err) + goto err_qp; + + err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_CQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_cqs, + dev->caps.reserved_cqs, 0, 0); + if (err) + goto err_srq; + + num_eqs = dev->phys_caps.num_phys_eqs; + err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_EQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, num_eqs, num_eqs, 0, 0); + if (err) + goto err_cq; + + return 0; + +err_cq: + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + +err_srq: + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + +err_qp: + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + +err: + return err; +} + +static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca, u64 icm_size) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 aux_pages; + int num_eqs; + int err; + + err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); + if (err) { + mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); + return err; + } + + mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", + (unsigned long long) icm_size >> 10, + (unsigned long long) aux_pages << 2); + + priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, + GFP_HIGHUSER | __GFP_NOWARN, 0); + if (!priv->fw.aux_icm) { + mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); + return -ENOMEM; + } + + err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); + if (err) { + mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); + goto err_free_aux; + } + + err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); + if (err) { + mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); + goto err_unmap_aux; + } + + + num_eqs = dev->phys_caps.num_phys_eqs; + err = mlx4_init_icm_table(dev, &priv->eq_table.table, + init_hca->eqc_base, dev_cap->eqc_entry_sz, + num_eqs, num_eqs, 0, 0); + if (err) { + mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); + goto err_unmap_cmpt; + } + + /* + * Reserved MTT entries must be aligned up to a cacheline + * boundary, since the FW will write to them, while the driver + * writes to all other MTT entries. (The variable + * dev->caps.mtt_entry_sz below is really the MTT segment + * size, not the raw entry size) + */ + dev->caps.reserved_mtts = + ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, + dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; + + err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, + init_hca->mtt_base, + dev->caps.mtt_entry_sz, + dev->caps.num_mtts, + dev->caps.reserved_mtts, 1, 0); + if (err) { + mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); + goto err_unmap_eq; + } + + err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, + init_hca->dmpt_base, + dev_cap->dmpt_entry_sz, + dev->caps.num_mpts, + dev->caps.reserved_mrws, 1, 1); + if (err) { + mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); + goto err_unmap_mtt; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, + init_hca->qpc_base, + dev_cap->qpc_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map QP context memory, aborting\n"); + goto err_unmap_dmpt; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, + init_hca->auxc_base, + dev_cap->aux_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); + goto err_unmap_qp; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, + init_hca->altc_base, + dev_cap->altc_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); + goto err_unmap_auxc; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, + init_hca->rdmarc_base, + dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); + goto err_unmap_altc; + } + + err = mlx4_init_icm_table(dev, &priv->cq_table.table, + init_hca->cqc_base, + dev_cap->cqc_entry_sz, + dev->caps.num_cqs, + dev->caps.reserved_cqs, 0, 0); + if (err) { + mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); + goto err_unmap_rdmarc; + } + + err = mlx4_init_icm_table(dev, &priv->srq_table.table, + init_hca->srqc_base, + dev_cap->srq_entry_sz, + dev->caps.num_srqs, + dev->caps.reserved_srqs, 0, 0); + if (err) { + mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); + goto err_unmap_cq; + } + + /* + * For flow steering device managed mode it is required to use + * mlx4_init_icm_table. For B0 steering mode it's not strictly + * required, but for simplicity just map the whole multicast + * group table now. The table isn't very big and it's a lot + * easier than trying to track ref counts. + */ + err = mlx4_init_icm_table(dev, &priv->mcg_table.table, + init_hca->mc_base, + mlx4_get_mgm_entry_size(dev), + dev->caps.num_mgms + dev->caps.num_amgms, + dev->caps.num_mgms + dev->caps.num_amgms, + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); + goto err_unmap_srq; + } + + return 0; + +err_unmap_srq: + mlx4_cleanup_icm_table(dev, &priv->srq_table.table); + +err_unmap_cq: + mlx4_cleanup_icm_table(dev, &priv->cq_table.table); + +err_unmap_rdmarc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); + +err_unmap_altc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); + +err_unmap_auxc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); + +err_unmap_qp: + mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); + +err_unmap_dmpt: + mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); + +err_unmap_mtt: + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + +err_unmap_eq: + mlx4_cleanup_icm_table(dev, &priv->eq_table.table); + +err_unmap_cmpt: + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + +err_unmap_aux: + mlx4_UNMAP_ICM_AUX(dev); + +err_free_aux: + mlx4_free_icm(dev, priv->fw.aux_icm, 0); + + return err; +} + +static void mlx4_free_icms(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + mlx4_cleanup_icm_table(dev, &priv->eq_table.table); + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + + mlx4_UNMAP_ICM_AUX(dev); + mlx4_free_icm(dev, priv->fw.aux_icm, 0); +} + +static void mlx4_slave_exit(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->cmd.slave_cmd_mutex); + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, + MLX4_COMM_TIME)) + mlx4_warn(dev, "Failed to close slave function\n"); + mutex_unlock(&priv->cmd.slave_cmd_mutex); +} + +static int map_bf_area(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + resource_size_t bf_start; + resource_size_t bf_len; + int err = 0; + + if (!dev->caps.bf_reg_size) + return -ENXIO; + + bf_start = pci_resource_start(dev->persist->pdev, 2) + + (dev->caps.num_uars << PAGE_SHIFT); + bf_len = pci_resource_len(dev->persist->pdev, 2) - + (dev->caps.num_uars << PAGE_SHIFT); + priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); + if (!priv->bf_mapping) + err = -ENOMEM; + + return err; +} + +static void unmap_bf_area(struct mlx4_dev *dev) +{ + if (mlx4_priv(dev)->bf_mapping) + io_mapping_free(mlx4_priv(dev)->bf_mapping); +} + +cycle_t mlx4_read_clock(struct mlx4_dev *dev) +{ + u32 clockhi, clocklo, clockhi1; + cycle_t cycles; + int i; + struct mlx4_priv *priv = mlx4_priv(dev); + + for (i = 0; i < 10; i++) { + clockhi = swab32(readl(priv->clock_mapping)); + clocklo = swab32(readl(priv->clock_mapping + 4)); + clockhi1 = swab32(readl(priv->clock_mapping)); + if (clockhi == clockhi1) + break; + } + + cycles = (u64) clockhi << 32 | (u64) clocklo; + + return cycles; +} +EXPORT_SYMBOL_GPL(mlx4_read_clock); + + +static int map_internal_clock(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->clock_mapping = + ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.clock_bar) + + priv->fw.clock_offset, MLX4_CLOCK_SIZE); + + if (!priv->clock_mapping) + return -ENOMEM; + + return 0; +} + +static void unmap_internal_clock(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (priv->clock_mapping) + iounmap(priv->clock_mapping); +} + +static void mlx4_close_hca(struct mlx4_dev *dev) +{ + unmap_internal_clock(dev); + unmap_bf_area(dev); + if (mlx4_is_slave(dev)) + mlx4_slave_exit(dev); + else { + mlx4_CLOSE_HCA(dev, 0); + mlx4_free_icms(dev); + } +} + +static void mlx4_close_fw(struct mlx4_dev *dev) +{ + if (!mlx4_is_slave(dev)) { + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); + } +} + +static int mlx4_comm_check_offline(struct mlx4_dev *dev) +{ +#define COMM_CHAN_OFFLINE_OFFSET 0x09 + + u32 comm_flags; + u32 offline_bit; + unsigned long end; + struct mlx4_priv *priv = mlx4_priv(dev); + + end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; + while (time_before(jiffies, end)) { + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + offline_bit = (comm_flags & + (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); + if (!offline_bit) + return 0; + /* There are cases as part of AER/Reset flow that PF needs + * around 100 msec to load. We therefore sleep for 100 msec + * to allow other tasks to make use of that CPU during this + * time interval. + */ + msleep(100); + } + mlx4_err(dev, "Communication channel is offline.\n"); + return -EIO; +} + +static void mlx4_reset_vf_support(struct mlx4_dev *dev) +{ +#define COMM_CHAN_RST_OFFSET 0x1e + + struct mlx4_priv *priv = mlx4_priv(dev); + u32 comm_rst; + u32 comm_caps; + + comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_CAPS)); + comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); + + if (comm_rst) + dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; +} + +static int mlx4_init_slave(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 dma = (u64) priv->mfunc.vhcr_dma; + int ret_from_reset = 0; + u32 slave_read; + u32 cmd_channel_ver; + + if (atomic_read(&pf_loading)) { + mlx4_warn(dev, "PF is not ready - Deferring probe\n"); + return -EPROBE_DEFER; + } + + mutex_lock(&priv->cmd.slave_cmd_mutex); + priv->cmd.max_cmds = 1; + if (mlx4_comm_check_offline(dev)) { + mlx4_err(dev, "PF is not responsive, skipping initialization\n"); + goto err_offline; + } + + mlx4_reset_vf_support(dev); + mlx4_warn(dev, "Sending reset\n"); + ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); + /* if we are in the middle of flr the slave will try + * NUM_OF_RESET_RETRIES times before leaving.*/ + if (ret_from_reset) { + if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { + mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return -EPROBE_DEFER; + } else + goto err; + } + + /* check the driver version - the slave I/F revision + * must match the master's */ + slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); + cmd_channel_ver = mlx4_comm_get_version(); + + if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != + MLX4_COMM_GET_IF_REV(slave_read)) { + mlx4_err(dev, "slave driver version is not supported by the master\n"); + goto err; + } + + mlx4_warn(dev, "Sending vhcr0\n"); + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return 0; + +err: + mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); +err_offline: + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return -EIO; +} + +static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) +{ + int i; + + for (i = 1; i <= dev->caps.num_ports; i++) { + if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) + dev->caps.gid_table_len[i] = + mlx4_get_slave_num_gids(dev, 0, i); + else + dev->caps.gid_table_len[i] = 1; + dev->caps.pkey_table_len[i] = + dev->phys_caps.pkey_phys_table_len[i] - 1; + } +} + +static int choose_log_fs_mgm_entry_size(int qp_per_entry) +{ + int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; + + for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; + i++) { + if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) + break; + } + + return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; +} + +static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) +{ + switch (dmfs_high_steer_mode) { + case MLX4_STEERING_DMFS_A0_DEFAULT: + return "default performance"; + + case MLX4_STEERING_DMFS_A0_DYNAMIC: + return "dynamic hybrid mode"; + + case MLX4_STEERING_DMFS_A0_STATIC: + return "performance optimized for limited rule configuration (static)"; + + case MLX4_STEERING_DMFS_A0_DISABLE: + return "disabled performance optimized steering"; + + case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: + return "performance optimized steering not supported"; + + default: + return "Unrecognized mode"; + } +} + +#define MLX4_DMFS_A0_STEERING (1UL << 2) + +static void choose_steering_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + if (mlx4_log_num_mgm_entry_size <= 0) { + if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { + if (dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + mlx4_err(dev, "DMFS high rate mode not supported\n"); + else + dev->caps.dmfs_high_steer_mode = + MLX4_STEERING_DMFS_A0_STATIC; + } + } + + if (mlx4_log_num_mgm_entry_size <= 0 && + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && + (!mlx4_is_mfunc(dev) || + (dev_cap->fs_max_num_qp_per_entry >= + (dev->persist->num_vfs + 1))) && + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= + MLX4_MIN_MGM_LOG_ENTRY_SIZE) { + dev->oper_log_mgm_entry_size = + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); + dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; + dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; + dev->caps.fs_log_max_ucast_qp_range_size = + dev_cap->fs_log_max_ucast_qp_range_size; + } else { + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) + dev->caps.steering_mode = MLX4_STEERING_MODE_B0; + else { + dev->caps.steering_mode = MLX4_STEERING_MODE_A0; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) + mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); + } + dev->oper_log_mgm_entry_size = + mlx4_log_num_mgm_entry_size > 0 ? + mlx4_log_num_mgm_entry_size : + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; + dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); + } + mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", + mlx4_steering_mode_str(dev->caps.steering_mode), + dev->oper_log_mgm_entry_size, + mlx4_log_num_mgm_entry_size); +} + +static void choose_tunnel_offload_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; + else + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; + + mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode + == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); +} + +static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) +{ + int i; + struct mlx4_port_cap port_cap; + + if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + return -EINVAL; + + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_dev_port(dev, i, &port_cap)) { + mlx4_err(dev, + "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); + } else if ((dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_DEFAULT) && + (port_cap.dmfs_optimized_state == + !!(dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_DISABLE))) { + mlx4_err(dev, + "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", + dmfs_high_rate_steering_mode_str( + dev->caps.dmfs_high_steer_mode), + (port_cap.dmfs_optimized_state ? + "enabled" : "disabled")); + } + } + + return 0; +} + +static int mlx4_init_fw(struct mlx4_dev *dev) +{ + struct mlx4_mod_stat_cfg mlx4_cfg; + int err = 0; + + if (!mlx4_is_slave(dev)) { + err = mlx4_QUERY_FW(dev); + if (err) { + if (err == -EACCES) + mlx4_info(dev, "non-primary physical function, skipping\n"); + else + mlx4_err(dev, "QUERY_FW command failed, aborting\n"); + return err; + } + + err = mlx4_load_fw(dev); + if (err) { + mlx4_err(dev, "Failed to start FW, aborting\n"); + return err; + } + + mlx4_cfg.log_pg_sz_m = 1; + mlx4_cfg.log_pg_sz = 0; + err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); + if (err) + mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); + } + + return err; +} + +static int mlx4_init_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_adapter adapter; + struct mlx4_dev_cap dev_cap; + struct mlx4_profile profile; + struct mlx4_init_hca_param init_hca; + u64 icm_size; + struct mlx4_config_dev_params params; + int err; + + if (!mlx4_is_slave(dev)) { + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); + return err; + } + + choose_steering_mode(dev, &dev_cap); + choose_tunnel_offload_mode(dev, &dev_cap); + + if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && + mlx4_is_master(dev)) + dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; + + err = mlx4_get_phys_port_id(dev); + if (err) + mlx4_err(dev, "Fail to get physical port id\n"); + + if (mlx4_is_master(dev)) + mlx4_parav_master_pf_caps(dev); + + if (mlx4_low_memory_profile()) { + mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); + profile = low_mem_profile; + } else { + profile = default_profile; + } + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) + profile.num_mcg = MLX4_FS_NUM_MCG; + + icm_size = mlx4_make_profile(dev, &profile, &dev_cap, + &init_hca); + if ((long long) icm_size < 0) { + err = icm_size; + return err; + } + + dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; + + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + init_hca.uar_page_sz = PAGE_SHIFT - 12; + init_hca.mw_enabled = 0; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || + dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) + init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; + + err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + if (err) + return err; + + err = mlx4_INIT_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "INIT_HCA command failed, aborting\n"); + goto err_free_icm; + } + + if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, &dev_cap); + if (err < 0) { + mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); + goto err_close; + } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { + dev->caps.num_eqs = dev_cap.max_eqs; + dev->caps.reserved_eqs = dev_cap.reserved_eqs; + dev->caps.reserved_uars = dev_cap.reserved_uars; + } + } + + /* + * If TS is supported by FW + * read HCA frequency by QUERY_HCA command + */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { + memset(&init_hca, 0, sizeof(init_hca)); + err = mlx4_QUERY_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + } else { + dev->caps.hca_core_clock = + init_hca.hca_core_clock; + } + + /* In case we got HCA frequency 0 - disable timestamping + * to avoid dividing by zero + */ + if (!dev->caps.hca_core_clock) { + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_err(dev, + "HCA frequency is 0 - timestamping is not supported\n"); + } else if (map_internal_clock(dev)) { + /* + * Map internal clock, + * in case of failure disable timestamping + */ + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); + } + } + + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { + if (mlx4_validate_optimized_steering(dev)) + mlx4_warn(dev, "Optimized steering validation failed\n"); + + if (dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_DISABLE) { + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + dev->caps.dmfs_high_rate_qpn_range = + MLX4_A0_STEERING_TABLE_SIZE; + } + + mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", + dmfs_high_rate_steering_mode_str( + dev->caps.dmfs_high_steer_mode)); + } + } else { + err = mlx4_init_slave(dev); + if (err) { + if (err != -EPROBE_DEFER) + mlx4_err(dev, "Failed to initialize slave\n"); + return err; + } + + err = mlx4_slave_cap(dev); + if (err) { + mlx4_err(dev, "Failed to obtain slave caps\n"); + goto err_close; + } + } + + if (map_bf_area(dev)) + mlx4_dbg(dev, "Failed to map blue flame area\n"); + + /*Only the master set the ports, all the rest got it from it.*/ + if (!mlx4_is_slave(dev)) + mlx4_set_port_mask(dev); + + err = mlx4_QUERY_ADAPTER(dev, &adapter); + if (err) { + mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); + goto unmap_bf; + } + + /* Query CONFIG_DEV parameters */ + err = mlx4_config_dev_retrieval(dev, ¶ms); + if (err && err != -ENOTSUPP) { + mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); + } else if (!err) { + dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; + dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; + } + priv->eq_table.inta_pin = adapter.inta_pin; + memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); + + return 0; + +unmap_bf: + unmap_internal_clock(dev); + unmap_bf_area(dev); + + if (mlx4_is_slave(dev)) { + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + } + +err_close: + if (mlx4_is_slave(dev)) + mlx4_slave_exit(dev); + else + mlx4_CLOSE_HCA(dev, 0); + +err_free_icm: + if (!mlx4_is_slave(dev)) + mlx4_free_icms(dev); + + return err; +} + +static int mlx4_init_counters_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int nent; + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) + return -ENOENT; + + nent = dev->caps.max_counters; + return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); +} + +static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); +} + +int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) + return -ENOENT; + + *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); + if (*idx == -1) + return -ENOMEM; + + return 0; +} + +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, + RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *idx = get_param_l(&out_param); + + return err; + } + return __mlx4_counter_alloc(dev, idx); +} +EXPORT_SYMBOL_GPL(mlx4_counter_alloc); + +void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); + return; +} + +void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, idx); + mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + return; + } + __mlx4_counter_free(dev, idx); +} +EXPORT_SYMBOL_GPL(mlx4_counter_free); + +void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} +EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); + +__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->mfunc.master.vf_admin[entry].vport[port].guid; +} +EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); + +void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + __be64 guid; + + /* hw GUID */ + if (entry == 0) + return; + + get_random_bytes((char *)&guid, sizeof(guid)); + guid &= ~(cpu_to_be64(1ULL << 56)); + guid |= cpu_to_be64(1ULL << 57); + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} + +static int mlx4_setup_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int port; + __be32 ib_port_default_caps; + + err = mlx4_init_uar_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); + return err; + } + + err = mlx4_uar_alloc(dev, &priv->driver_uar); + if (err) { + mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); + goto err_uar_table_free; + } + + priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + if (!priv->kar) { + mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); + err = -ENOMEM; + goto err_uar_free; + } + + err = mlx4_init_pd_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); + goto err_kar_unmap; + } + + err = mlx4_init_xrcd_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); + goto err_pd_table_free; + } + + err = mlx4_init_mr_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); + goto err_xrcd_table_free; + } + + if (!mlx4_is_slave(dev)) { + err = mlx4_init_mcg_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); + goto err_mr_table_free; + } + err = mlx4_config_mad_demux(dev); + if (err) { + mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); + goto err_mcg_table_free; + } + } + + err = mlx4_init_eq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); + goto err_mcg_table_free; + } + + err = mlx4_cmd_use_events(dev); + if (err) { + mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); + goto err_eq_table_free; + } + + err = mlx4_NOP(dev); + if (err) { + if (dev->flags & MLX4_FLAG_MSI_X) { + mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", + priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + mlx4_warn(dev, "Trying again without MSI-X\n"); + } else { + mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", + priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); + } + + goto err_cmd_poll; + } + + mlx4_dbg(dev, "NOP command IRQ test passed\n"); + + err = mlx4_init_cq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); + goto err_cmd_poll; + } + + err = mlx4_init_srq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); + goto err_cq_table_free; + } + + err = mlx4_init_qp_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); + goto err_srq_table_free; + } + + err = mlx4_init_counters_table(dev); + if (err && err != -ENOENT) { + mlx4_err(dev, "Failed to initialize counters table, aborting\n"); + goto err_qp_table_free; + } + + if (!mlx4_is_slave(dev)) { + for (port = 1; port <= dev->caps.num_ports; port++) { + ib_port_default_caps = 0; + err = mlx4_get_port_ib_caps(dev, port, + &ib_port_default_caps); + if (err) + mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", + port, err); + dev->caps.ib_port_def_cap[port] = ib_port_default_caps; + + /* initialize per-slave default ib port capabilities */ + if (mlx4_is_master(dev)) { + int i; + for (i = 0; i < dev->num_slaves; i++) { + if (i == mlx4_master_func_num(dev)) + continue; + priv->mfunc.master.slave_state[i].ib_cap_mask[port] = + ib_port_default_caps; + } + } + + if (mlx4_is_mfunc(dev)) + dev->caps.port_ib_mtu[port] = IB_MTU_2048; + else + dev->caps.port_ib_mtu[port] = IB_MTU_4096; + + err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? + dev->caps.pkey_table_len[port] : -1); + if (err) { + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); + goto err_counters_table_free; + } + } + } + + return 0; + +err_counters_table_free: + mlx4_cleanup_counters_table(dev); + +err_qp_table_free: + mlx4_cleanup_qp_table(dev); + +err_srq_table_free: + mlx4_cleanup_srq_table(dev); + +err_cq_table_free: + mlx4_cleanup_cq_table(dev); + +err_cmd_poll: + mlx4_cmd_use_polling(dev); + +err_eq_table_free: + mlx4_cleanup_eq_table(dev); + +err_mcg_table_free: + if (!mlx4_is_slave(dev)) + mlx4_cleanup_mcg_table(dev); + +err_mr_table_free: + mlx4_cleanup_mr_table(dev); + +err_xrcd_table_free: + mlx4_cleanup_xrcd_table(dev); + +err_pd_table_free: + mlx4_cleanup_pd_table(dev); + +err_kar_unmap: + iounmap(priv->kar); + +err_uar_free: + mlx4_uar_free(dev, &priv->driver_uar); + +err_uar_table_free: + mlx4_cleanup_uar_table(dev); + return err; +} + +static void mlx4_enable_msi_x(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct msix_entry *entries; + int i; + + if (msi_x) { + int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, + nreq); + + entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); + if (!entries) + goto no_msi; + + for (i = 0; i < nreq; ++i) + entries[i].entry = i; + + nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, + nreq); + + if (nreq < 0) { + kfree(entries); + goto no_msi; + } else if (nreq < MSIX_LEGACY_SZ + + dev->caps.num_ports * MIN_MSIX_P_PORT) { + /*Working in legacy mode , all EQ's shared*/ + dev->caps.comp_pool = 0; + dev->caps.num_comp_vectors = nreq - 1; + } else { + dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; + dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; + } + for (i = 0; i < nreq; ++i) + priv->eq_table.eq[i].irq = entries[i].vector; + + dev->flags |= MLX4_FLAG_MSI_X; + + kfree(entries); + return; + } + +no_msi: + dev->caps.num_comp_vectors = 1; + dev->caps.comp_pool = 0; + + for (i = 0; i < 2; ++i) + priv->eq_table.eq[i].irq = dev->persist->pdev->irq; +} + +static int mlx4_init_port_info(struct mlx4_dev *dev, int port) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + int err = 0; + + info->dev = dev; + info->port = port; + if (!mlx4_is_slave(dev)) { + mlx4_init_mac_table(dev, &info->mac_table); + mlx4_init_vlan_table(dev, &info->vlan_table); + mlx4_init_roce_gid_table(dev, &info->gid_table); + info->base_qpn = mlx4_get_base_qpn(dev, port); + } + + sprintf(info->dev_name, "mlx4_port%d", port); + info->port_attr.attr.name = info->dev_name; + if (mlx4_is_mfunc(dev)) + info->port_attr.attr.mode = S_IRUGO; + else { + info->port_attr.attr.mode = S_IRUGO | S_IWUSR; + info->port_attr.store = set_port_type; + } + info->port_attr.show = show_port_type; + sysfs_attr_init(&info->port_attr.attr); + + err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); + if (err) { + mlx4_err(dev, "Failed to create file for port %d\n", port); + info->port = -1; + } + + sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); + info->port_mtu_attr.attr.name = info->dev_mtu_name; + if (mlx4_is_mfunc(dev)) + info->port_mtu_attr.attr.mode = S_IRUGO; + else { + info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; + info->port_mtu_attr.store = set_port_ib_mtu; + } + info->port_mtu_attr.show = show_port_ib_mtu; + sysfs_attr_init(&info->port_mtu_attr.attr); + + err = device_create_file(&dev->persist->pdev->dev, + &info->port_mtu_attr); + if (err) { + mlx4_err(dev, "Failed to create mtu file for port %d\n", port); + device_remove_file(&info->dev->persist->pdev->dev, + &info->port_attr); + info->port = -1; + } + + return err; +} + +static void mlx4_cleanup_port_info(struct mlx4_port_info *info) +{ + if (info->port < 0) + return; + + device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); + device_remove_file(&info->dev->persist->pdev->dev, + &info->port_mtu_attr); +} + +static int mlx4_init_steering(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int num_entries = dev->caps.num_ports; + int i, j; + + priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); + if (!priv->steer) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) + for (j = 0; j < MLX4_NUM_STEERS; j++) { + INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); + INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); + } + return 0; +} + +static void mlx4_clear_steering(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_steer_index *entry, *tmp_entry; + struct mlx4_promisc_qp *pqp, *tmp_pqp; + int num_entries = dev->caps.num_ports; + int i, j; + + for (i = 0; i < num_entries; i++) { + for (j = 0; j < MLX4_NUM_STEERS; j++) { + list_for_each_entry_safe(pqp, tmp_pqp, + &priv->steer[i].promisc_qps[j], + list) { + list_del(&pqp->list); + kfree(pqp); + } + list_for_each_entry_safe(entry, tmp_entry, + &priv->steer[i].steer_entries[j], + list) { + list_del(&entry->list); + list_for_each_entry_safe(pqp, tmp_pqp, + &entry->duplicates, + list) { + list_del(&pqp->list); + kfree(pqp); + } + kfree(entry); + } + } + } + kfree(priv->steer); +} + +static int extended_func_num(struct pci_dev *pdev) +{ + return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); +} + +#define MLX4_OWNER_BASE 0x8069c +#define MLX4_OWNER_SIZE 4 + +static int mlx4_get_ownership(struct mlx4_dev *dev) +{ + void __iomem *owner; + u32 ret; + + if (pci_channel_offline(dev->persist->pdev)) + return -EIO; + + owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_OWNER_BASE, + MLX4_OWNER_SIZE); + if (!owner) { + mlx4_err(dev, "Failed to obtain ownership bit\n"); + return -ENOMEM; + } + + ret = readl(owner); + iounmap(owner); + return (int) !!ret; +} + +static void mlx4_free_ownership(struct mlx4_dev *dev) +{ + void __iomem *owner; + + if (pci_channel_offline(dev->persist->pdev)) + return; + + owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_OWNER_BASE, + MLX4_OWNER_SIZE); + if (!owner) { + mlx4_err(dev, "Failed to obtain ownership bit\n"); + return; + } + writel(0, owner); + msleep(1000); + iounmap(owner); +} + +#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ + !!((flags) & MLX4_FLAG_MASTER)) + +static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, + u8 total_vfs, int existing_vfs, int reset_flow) +{ + u64 dev_flags = dev->flags; + int err = 0; + + if (reset_flow) { + dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), + GFP_KERNEL); + if (!dev->dev_vfs) + goto free_mem; + return dev_flags; + } + + atomic_inc(&pf_loading); + if (dev->flags & MLX4_FLAG_SRIOV) { + if (existing_vfs != total_vfs) { + mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", + existing_vfs, total_vfs); + total_vfs = existing_vfs; + } + } + + dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); + if (NULL == dev->dev_vfs) { + mlx4_err(dev, "Failed to allocate memory for VFs\n"); + goto disable_sriov; + } + + if (!(dev->flags & MLX4_FLAG_SRIOV)) { + mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); + err = pci_enable_sriov(pdev, total_vfs); + } + if (err) { + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", + err); + goto disable_sriov; + } else { + mlx4_warn(dev, "Running in master mode\n"); + dev_flags |= MLX4_FLAG_SRIOV | + MLX4_FLAG_MASTER; + dev_flags &= ~MLX4_FLAG_SLAVE; + dev->persist->num_vfs = total_vfs; + } + return dev_flags; + +disable_sriov: + atomic_dec(&pf_loading); +free_mem: + dev->persist->num_vfs = 0; + kfree(dev->dev_vfs); + return dev_flags & ~MLX4_FLAG_MASTER; +} + +enum { + MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, +}; + +static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + int *nvfs) +{ + int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; + /* Checking for 64 VFs as a limitation of CX2 */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && + requested_vfs >= 64) { + mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", + requested_vfs); + return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; + } + return 0; +} + +static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, + int total_vfs, int *nvfs, struct mlx4_priv *priv, + int reset_flow) +{ + struct mlx4_dev *dev; + unsigned sum = 0; + int err; + int port; + int i; + struct mlx4_dev_cap *dev_cap = NULL; + int existing_vfs = 0; + + dev = &priv->dev; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + + mutex_init(&priv->port_mutex); + mutex_init(&priv->bond_mutex); + + INIT_LIST_HEAD(&priv->pgdir_list); + mutex_init(&priv->pgdir_mutex); + + INIT_LIST_HEAD(&priv->bf_list); + mutex_init(&priv->bf_mutex); + + dev->rev_id = pdev->revision; + dev->numa_node = dev_to_node(&pdev->dev); + + /* Detect if this device is a virtual function */ + if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { + mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); + dev->flags |= MLX4_FLAG_SLAVE; + } else { + /* We reset the device and enable SRIOV only for physical + * devices. Try to claim ownership on the device; + * if already taken, skip -- do not allow multiple PFs */ + err = mlx4_get_ownership(dev); + if (err) { + if (err < 0) + return err; + else { + mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); + return -EINVAL; + } + } + + atomic_set(&priv->opreq_count, 0); + INIT_WORK(&priv->opreq_task, mlx4_opreq_action); + + /* + * Now reset the HCA before we touch the PCI capabilities or + * attempt a firmware command, since a boot ROM may have left + * the HCA in an undefined state. + */ + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting\n"); + goto err_sriov; + } + + if (total_vfs) { + dev->flags = MLX4_FLAG_MASTER; + existing_vfs = pci_num_vf(pdev); + if (existing_vfs) + dev->flags |= MLX4_FLAG_SRIOV; + dev->persist->num_vfs = total_vfs; + } + } + + /* on load remove any previous indication of internal error, + * device is up. + */ + dev->persist->state = MLX4_DEVICE_STATE_UP; + +slave_start: + err = mlx4_cmd_init(dev); + if (err) { + mlx4_err(dev, "Failed to init command interface, aborting\n"); + goto err_sriov; + } + + /* In slave functions, the communication channel must be initialized + * before posting commands. Also, init num_slaves before calling + * mlx4_init_hca */ + if (mlx4_is_mfunc(dev)) { + if (mlx4_is_master(dev)) { + dev->num_slaves = MLX4_MAX_NUM_SLAVES; + + } else { + dev->num_slaves = 0; + err = mlx4_multi_func_init(dev); + if (err) { + mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); + goto err_cmd; + } + } + } + + err = mlx4_init_fw(dev); + if (err) { + mlx4_err(dev, "Failed to init fw, aborting.\n"); + goto err_mfunc; + } + + if (mlx4_is_master(dev)) { + /* when we hit the goto slave_start below, dev_cap already initialized */ + if (!dev_cap) { + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + + if (!dev_cap) { + err = -ENOMEM; + goto err_fw; + } + + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, + total_vfs, + existing_vfs, + reset_flow); + + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + dev->flags = dev_flags; + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_sriov; + } + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + goto err_sriov; + } + goto slave_start; + } + } else { + /* Legacy mode FW requires SRIOV to be enabled before + * doing QUERY_DEV_CAP, since max_eq's value is different if + * SRIOV is enabled. + */ + memset(dev_cap, 0, sizeof(*dev_cap)); + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + } + } + + err = mlx4_init_hca(dev); + if (err) { + if (err == -EACCES) { + /* Not primary Physical function + * Running in slave mode */ + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + /* We're not a PF */ + if (dev->flags & MLX4_FLAG_SRIOV) { + if (!existing_vfs) + pci_disable_sriov(pdev); + if (mlx4_is_master(dev) && !reset_flow) + atomic_dec(&pf_loading); + dev->flags &= ~MLX4_FLAG_SRIOV; + } + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + dev->flags |= MLX4_FLAG_SLAVE; + dev->flags &= ~MLX4_FLAG_MASTER; + goto slave_start; + } else + goto err_fw; + } + + if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, + existing_vfs, reset_flow); + + if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); + dev->flags = dev_flags; + err = mlx4_cmd_init(dev); + if (err) { + /* Only VHCR is cleaned up, so could still + * send FW commands + */ + mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); + goto err_close; + } + } else { + dev->flags = dev_flags; + } + + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_close; + } + } + + /* check if the device is functioning at its maximum possible speed. + * No return code for this call, just warn the user in case of PCI + * express device capabilities are under-satisfied by the bus. + */ + if (!mlx4_is_slave(dev)) + mlx4_check_pcie_caps(dev); + + /* In master functions, the communication channel must be initialized + * after obtaining its address from fw */ + if (mlx4_is_master(dev)) { + int ib_ports = 0; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_ports++; + + if (ib_ports && + (num_vfs_argc > 1 || probe_vfs_argc > 1)) { + mlx4_err(dev, + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); + err = -EINVAL; + goto err_close; + } + if (dev->caps.num_ports < 2 && + num_vfs_argc > 1) { + err = -EINVAL; + mlx4_err(dev, + "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", + dev->caps.num_ports); + goto err_close; + } + memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); + + for (i = 0; + i < sizeof(dev->persist->nvfs)/ + sizeof(dev->persist->nvfs[0]); i++) { + unsigned j; + + for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { + dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; + dev->dev_vfs[sum].n_ports = i < 2 ? 1 : + dev->caps.num_ports; + } + } + + /* In master functions, the communication channel + * must be initialized after obtaining its address from fw + */ + err = mlx4_multi_func_init(dev); + if (err) { + mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); + goto err_close; + } + } + + err = mlx4_alloc_eq_table(dev); + if (err) + goto err_master_mfunc; + + priv->msix_ctl.pool_bm = 0; + mutex_init(&priv->msix_ctl.pool_lock); + + mlx4_enable_msi_x(dev); + if ((mlx4_is_mfunc(dev)) && + !(dev->flags & MLX4_FLAG_MSI_X)) { + err = -ENOSYS; + mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); + goto err_free_eq; + } + + if (!mlx4_is_slave(dev)) { + err = mlx4_init_steering(dev); + if (err) + goto err_disable_msix; + } + + err = mlx4_setup_hca(dev); + if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && + !mlx4_is_mfunc(dev)) { + dev->flags &= ~MLX4_FLAG_MSI_X; + dev->caps.num_comp_vectors = 1; + dev->caps.comp_pool = 0; + pci_disable_msix(pdev); + err = mlx4_setup_hca(dev); + } + + if (err) + goto err_steer; + + mlx4_init_quotas(dev); + /* When PF resources are ready arm its comm channel to enable + * getting commands + */ + if (mlx4_is_master(dev)) { + err = mlx4_ARM_COMM_CHANNEL(dev); + if (err) { + mlx4_err(dev, " Failed to arm comm channel eq: %x\n", + err); + goto err_steer; + } + } + + for (port = 1; port <= dev->caps.num_ports; port++) { + err = mlx4_init_port_info(dev, port); + if (err) + goto err_port; + } + + priv->v2p.port1 = 1; + priv->v2p.port2 = 2; + + err = mlx4_register_device(dev); + if (err) + goto err_port; + + mlx4_request_modules(dev); + + mlx4_sense_init(dev); + mlx4_start_sense(dev); + + priv->removed = 0; + + if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) + atomic_dec(&pf_loading); + + kfree(dev_cap); + return 0; + +err_port: + for (--port; port >= 1; --port) + mlx4_cleanup_port_info(&priv->port[port]); + + mlx4_cleanup_counters_table(dev); + mlx4_cleanup_qp_table(dev); + mlx4_cleanup_srq_table(dev); + mlx4_cleanup_cq_table(dev); + mlx4_cmd_use_polling(dev); + mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); + mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); + mlx4_cleanup_pd_table(dev); + mlx4_cleanup_uar_table(dev); + +err_steer: + if (!mlx4_is_slave(dev)) + mlx4_clear_steering(dev); + +err_disable_msix: + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + +err_free_eq: + mlx4_free_eq_table(dev); + +err_master_mfunc: + if (mlx4_is_master(dev)) { + mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); + mlx4_multi_func_cleanup(dev); + } + + if (mlx4_is_slave(dev)) { + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + } + +err_close: + mlx4_close_hca(dev); + +err_fw: + mlx4_close_fw(dev); + +err_mfunc: + if (mlx4_is_slave(dev)) + mlx4_multi_func_cleanup(dev); + +err_cmd: + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + +err_sriov: + if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { + pci_disable_sriov(pdev); + dev->flags &= ~MLX4_FLAG_SRIOV; + } + + if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) + atomic_dec(&pf_loading); + + kfree(priv->dev.dev_vfs); + + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + + kfree(dev_cap); + return err; +} + +static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, + struct mlx4_priv *priv) +{ + int err; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { + {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; + unsigned total_vfs = 0; + unsigned int i; + + pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS + * per port, we must limit the number of VFs to 63 (since their are + * 128 MACs) + */ + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; + total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { + nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; + if (nvfs[i] < 0) { + dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; + i++) { + prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; + if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { + dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + if (total_vfs >= MLX4_MAX_NUM_VF) { + dev_err(&pdev->dev, + "Requested more VF's (%d) than allowed (%d)\n", + total_vfs, MLX4_MAX_NUM_VF - 1); + err = -EINVAL; + goto err_disable_pdev; + } + + for (i = 0; i < MLX4_MAX_PORTS; i++) { + if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + dev_err(&pdev->dev, + "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + nvfs[i] + nvfs[2], i + 1, + MLX4_MAX_NUM_VF_P_PORT - 1); + err = -EINVAL; + goto err_disable_pdev; + } + } + + /* Check for BARs. */ + if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && + !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", + pci_dev_data, pci_resource_flags(pdev, 0)); + err = -ENODEV; + goto err_disable_pdev; + } + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing UAR, aborting\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + + /* Allow large DMA segments, up to the firmware limit of 1 GB */ + dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); + /* Detect if this device is a virtual function */ + if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { + /* When acting as pf, we normally skip vfs unless explicitly + * requested to probe them. + */ + if (total_vfs) { + unsigned vfs_offset = 0; + + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && + vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset += nvfs[i], i++) + ; + if (i == sizeof(nvfs)/sizeof(nvfs[0])) { + err = -ENODEV; + goto err_release_regions; + } + if ((extended_func_num(pdev) - vfs_offset) + > prb_vf[i]) { + dev_warn(&pdev->dev, "Skipping virtual function:%d\n", + extended_func_num(pdev)); + err = -ENODEV; + goto err_release_regions; + } + } + } + + err = mlx4_catas_init(&priv->dev); + if (err) + goto err_release_regions; + + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); + if (err) + goto err_catas; + + return 0; + +err_catas: + mlx4_catas_end(&priv->dev); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mlx4_priv *priv; + struct mlx4_dev *dev; + int ret; + + printk_once(KERN_INFO "%s", mlx4_version); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev = &priv->dev; + dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); + if (!dev->persist) { + kfree(priv); + return -ENOMEM; + } + dev->persist->pdev = pdev; + dev->persist->dev = dev; + pci_set_drvdata(pdev, dev->persist); + priv->pci_dev_data = id->driver_data; + mutex_init(&dev->persist->device_state_mutex); + mutex_init(&dev->persist->interface_state_mutex); + + ret = __mlx4_init_one(pdev, id->driver_data, priv); + if (ret) { + kfree(dev->persist); + kfree(priv); + } else { + pci_save_state(pdev); + } + + return ret; +} + +static void mlx4_clean_dev(struct mlx4_dev *dev) +{ + struct mlx4_dev_persistent *persist = dev->persist; + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); + + memset(priv, 0, sizeof(*priv)); + priv->dev.persist = persist; + priv->dev.flags = flags; +} + +static void mlx4_unload_one(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int pci_dev_data; + int p, i; + + if (priv->removed) + return; + + /* saving current ports type for further use */ + for (i = 0; i < dev->caps.num_ports; i++) { + dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; + dev->persist->curr_port_poss_type[i] = dev->caps. + possible_type[i + 1]; + } + + pci_dev_data = priv->pci_dev_data; + + mlx4_stop_sense(dev); + mlx4_unregister_device(dev); + + for (p = 1; p <= dev->caps.num_ports; p++) { + mlx4_cleanup_port_info(&priv->port[p]); + mlx4_CLOSE_PORT(dev, p); + } + + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev, + RES_TR_FREE_SLAVES_ONLY); + + mlx4_cleanup_counters_table(dev); + mlx4_cleanup_qp_table(dev); + mlx4_cleanup_srq_table(dev); + mlx4_cleanup_cq_table(dev); + mlx4_cmd_use_polling(dev); + mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); + mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); + mlx4_cleanup_pd_table(dev); + + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev, + RES_TR_FREE_STRUCTS_ONLY); + + iounmap(priv->kar); + mlx4_uar_free(dev, &priv->driver_uar); + mlx4_cleanup_uar_table(dev); + if (!mlx4_is_slave(dev)) + mlx4_clear_steering(dev); + mlx4_free_eq_table(dev); + if (mlx4_is_master(dev)) + mlx4_multi_func_cleanup(dev); + mlx4_close_hca(dev); + mlx4_close_fw(dev); + if (mlx4_is_slave(dev)) + mlx4_multi_func_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + kfree(dev->dev_vfs); + + mlx4_clean_dev(dev); + priv->pci_dev_data = pci_dev_data; + priv->removed = 1; +} + +static void mlx4_remove_one(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int active_vfs = 0; + + mutex_lock(&persist->interface_state_mutex); + persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; + mutex_unlock(&persist->interface_state_mutex); + + /* Disabling SR-IOV is not allowed while there are active vf's */ + if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { + active_vfs = mlx4_how_many_lives_vf(dev); + if (active_vfs) { + pr_warn("Removing PF when there are active VF's !!\n"); + pr_warn("Will not disable SR-IOV.\n"); + } + } + + /* device marked to be under deletion running now without the lock + * letting other tasks to be terminated + */ + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + else + mlx4_info(dev, "%s: interface is down\n", __func__); + mlx4_catas_end(dev); + if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { + mlx4_warn(dev, "Disabling SR-IOV\n"); + pci_disable_sriov(pdev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(dev->persist); + kfree(priv); + pci_set_drvdata(pdev, NULL); +} + +static int restore_current_port_types(struct mlx4_dev *dev, + enum mlx4_port_type *types, + enum mlx4_port_type *poss_types) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err, i; + + mlx4_stop_sense(dev); + + mutex_lock(&priv->port_mutex); + for (i = 0; i < dev->caps.num_ports; i++) + dev->caps.possible_type[i + 1] = poss_types[i]; + err = mlx4_change_port_types(dev, types); + mlx4_start_sense(dev); + mutex_unlock(&priv->port_mutex); + + return err; +} + +int mlx4_restart_one(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int pci_dev_data, err, total_vfs; + + pci_dev_data = priv->pci_dev_data; + total_vfs = dev->persist->num_vfs; + memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); + + mlx4_unload_one(pdev); + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); + if (err) { + mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", + __func__, pci_name(pdev), err); + return err; + } + + err = restore_current_port_types(dev, dev->persist->curr_port_type, + dev->persist->curr_port_poss_type); + if (err) + mlx4_err(dev, "could not restore original port types (%d)\n", + err); + + return err; +} + +static const struct pci_device_id mlx4_pci_table[] = { + /* MT25408 "Hermon" SDR */ + { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" DDR */ + { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" QDR */ + { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" DDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" QDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" EN 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25458 ConnectX EN 10GBASE-T 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT26468 ConnectX EN 10GigE PCIe gen2*/ + { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ + { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT26478 ConnectX2 40GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25400 Family [ConnectX-2 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, + /* MT27500 Family [ConnectX-3] */ + { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, + /* MT27500 Family [ConnectX-3 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, + { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ + { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ + { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ + { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ + { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ + { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ + { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ + { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ + { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ + { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ + { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ + { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mlx4_pci_table); + +static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + + mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); + mlx4_enter_error_state(persist); + + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + + mutex_unlock(&persist->interface_state_mutex); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int ret; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int total_vfs; + + mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); + ret = pci_enable_device(pdev); + if (ret) { + mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + total_vfs = dev->persist->num_vfs; + memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); + + mutex_lock(&persist->interface_state_mutex); + if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { + ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, + priv, 1); + if (ret) { + mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", + __func__, ret); + goto end; + } + + ret = restore_current_port_types(dev, dev->persist-> + curr_port_type, dev->persist-> + curr_port_poss_type); + if (ret) + mlx4_err(dev, "could not restore original port types (%d)\n", ret); + } +end: + mutex_unlock(&persist->interface_state_mutex); + + return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +static void mlx4_shutdown(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + + mlx4_info(persist->dev, "mlx4_shutdown was called\n"); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + mutex_unlock(&persist->interface_state_mutex); +} + +static const struct pci_error_handlers mlx4_err_handler = { + .error_detected = mlx4_pci_err_detected, + .slot_reset = mlx4_pci_slot_reset, +}; + +static struct pci_driver mlx4_driver = { + .name = DRV_NAME, + .id_table = mlx4_pci_table, + .probe = mlx4_init_one, + .shutdown = mlx4_shutdown, + .remove = mlx4_remove_one, + .err_handler = &mlx4_err_handler, +}; + +static int __init mlx4_verify_params(void) +{ + if ((log_num_mac < 0) || (log_num_mac > 7)) { + pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); + return -1; + } + + if (log_num_vlan != 0) + pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", + MLX4_LOG_NUM_VLANS); + + if (use_prio != 0) + pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); + + if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { + pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", + log_mtts_per_seg); + return -1; + } + + /* Check if module param for ports type has legal combination */ + if (port_type_array[0] == false && port_type_array[1] == true) { + pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); + port_type_array[0] = true; + } + + if (mlx4_log_num_mgm_entry_size < -7 || + (mlx4_log_num_mgm_entry_size > 0 && + (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || + mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { + pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", + mlx4_log_num_mgm_entry_size, + MLX4_MIN_MGM_LOG_ENTRY_SIZE, + MLX4_MAX_MGM_LOG_ENTRY_SIZE); + return -1; + } + + return 0; +} + +static int __init mlx4_init(void) +{ + int ret; + + if (mlx4_verify_params()) + return -EINVAL; + + + mlx4_wq = create_singlethread_workqueue("mlx4"); + if (!mlx4_wq) + return -ENOMEM; + + ret = pci_register_driver(&mlx4_driver); + if (ret < 0) + destroy_workqueue(mlx4_wq); + return ret < 0 ? ret : 0; +} + +static void __exit mlx4_cleanup(void) +{ + pci_unregister_driver(&mlx4_driver); + destroy_workqueue(mlx4_wq); +} + +module_init(mlx4_init); +module_exit(mlx4_cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c new file mode 100644 index 000000000..bd9ea0d01 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -0,0 +1,1634 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/string.h> +#include <linux/etherdevice.h> + +#include <linux/mlx4/cmd.h> +#include <linux/export.h> + +#include "mlx4.h" + +static const u8 zero_gid[16]; /* automatically initialized to 0 */ + +int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) +{ + return 1 << dev->oper_log_mgm_entry_size; +} + +int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) +{ + return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); +} + +static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox, + u32 size, + u64 *reg_id) +{ + u64 imm; + int err = 0; + + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + return err; + *reg_id = imm; + + return err; +} + +static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) +{ + int err = 0; + + err = mlx4_cmd(dev, regid, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + return err; +} + +static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, + struct mlx4_cmd_mailbox *mailbox) +{ + return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, + struct mlx4_cmd_mailbox *mailbox) +{ + return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, + struct mlx4_cmd_mailbox *mailbox) +{ + u32 in_mod; + + in_mod = (u32) port << 16 | steer << 1; + return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, + MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); +} + +static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + u16 *hash, u8 op_mod) +{ + u64 imm; + int err; + + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, + MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (!err) + *hash = imm; + + return err; +} + +static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_promisc_qp *pqp; + + if (port < 1 || port > dev->caps.num_ports) + return NULL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { + if (pqp->qpn == qpn) + return pqp; + } + /* not found */ + return NULL; +} + +/* + * Add new entry to steering data structure. + * All promisc QPs should be added as well + */ +static int new_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + struct mlx4_steer_index *new_entry; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp = NULL; + u32 prot; + int err; + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + + INIT_LIST_HEAD(&new_entry->duplicates); + new_entry->index = index; + list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); + + /* If the given qpn is also a promisc qp, + * it should be inserted to duplicates list + */ + pqp = get_promisc_qp(dev, port, steer, qpn); + if (pqp) { + dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + if (!dqp) { + err = -ENOMEM; + goto out_alloc; + } + dqp->qpn = qpn; + list_add_tail(&dqp->list, &new_entry->duplicates); + } + + /* if no promisc qps for this vep, we are done */ + if (list_empty(&s_steer->promisc_qps[steer])) + return 0; + + /* now need to add all the promisc qps to the new + * steering entry, as they should also receive the packets + * destined to this address */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = -ENOMEM; + goto out_alloc; + } + mgm = mailbox->buf; + + err = mlx4_READ_ENTRY(dev, index, mailbox); + if (err) + goto out_mailbox; + + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; + prot = be32_to_cpu(mgm->members_count) >> 30; + list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { + /* don't add already existing qpn */ + if (pqp->qpn == qpn) + continue; + if (members_count == dev->caps.num_qp_per_mgm) { + /* out of space */ + err = -ENOMEM; + goto out_mailbox; + } + + /* add the qpn */ + mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); + } + /* update the qps count and update the entry with all the promisc qps*/ + mgm->members_count = cpu_to_be32(members_count | (prot << 30)); + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + +out_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); + if (!err) + return 0; +out_alloc: + if (dqp) { + list_del(&dqp->list); + kfree(dqp); + } + list_del(&new_entry->list); + kfree(new_entry); + return err; +} + +/* update the data structures with existing steering entry */ +static int existing_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_steer_index *tmp_entry, *entry = NULL; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp; + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + pqp = get_promisc_qp(dev, port, steer, qpn); + if (!pqp) + return 0; /* nothing to do */ + + list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { + if (tmp_entry->index == index) { + entry = tmp_entry; + break; + } + } + if (unlikely(!entry)) { + mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); + return -EINVAL; + } + + /* the given qpn is listed as a promisc qpn + * we need to add it as a duplicate to this entry + * for future references */ + list_for_each_entry(dqp, &entry->duplicates, list) { + if (qpn == dqp->qpn) + return 0; /* qp is already duplicated */ + } + + /* add the qp as a duplicate on this index */ + dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + if (!dqp) + return -ENOMEM; + dqp->qpn = qpn; + list_add_tail(&dqp->list, &entry->duplicates); + + return 0; +} + +/* Check whether a qpn is a duplicate on steering entry + * If so, it should not be removed from mgm */ +static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_steer_index *tmp_entry, *entry = NULL; + struct mlx4_promisc_qp *dqp, *tmp_dqp; + + if (port < 1 || port > dev->caps.num_ports) + return NULL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + /* if qp is not promisc, it cannot be duplicated */ + if (!get_promisc_qp(dev, port, steer, qpn)) + return false; + + /* The qp is promisc qp so it is a duplicate on this index + * Find the index entry, and remove the duplicate */ + list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { + if (tmp_entry->index == index) { + entry = tmp_entry; + break; + } + } + if (unlikely(!entry)) { + mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); + return false; + } + list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { + if (dqp->qpn == qpn) { + list_del(&dqp->list); + kfree(dqp); + } + } + return true; +} + +/* Returns true if all the QPs != tqpn contained in this entry + * are Promisc QPs. Returns false otherwise. + */ +static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 tqpn, + u32 *members_count) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 m_count; + bool ret = false; + int i; + + if (port < 1 || port > dev->caps.num_ports) + return false; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return false; + mgm = mailbox->buf; + + if (mlx4_READ_ENTRY(dev, index, mailbox)) + goto out; + m_count = be32_to_cpu(mgm->members_count) & 0xffffff; + if (members_count) + *members_count = m_count; + + for (i = 0; i < m_count; i++) { + u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; + if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { + /* the qp is not promisc, the entry can't be removed */ + goto out; + } + } + ret = true; +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} + +/* IF a steering entry contains only promisc QPs, it can be removed. */ +static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 tqpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_steer_index *entry = NULL, *tmp_entry; + u32 members_count; + bool ret = false; + + if (port < 1 || port > dev->caps.num_ports) + return NULL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + if (!promisc_steering_entry(dev, port, steer, index, + tqpn, &members_count)) + goto out; + + /* All the qps currently registered for this entry are promiscuous, + * Checking for duplicates */ + ret = true; + list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { + if (entry->index == index) { + if (list_empty(&entry->duplicates) || + members_count == 1) { + struct mlx4_promisc_qp *pqp, *tmp_pqp; + /* If there is only 1 entry in duplicates then + * this is the QP we want to delete, going over + * the list and deleting the entry. + */ + list_del(&entry->list); + list_for_each_entry_safe(pqp, tmp_pqp, + &entry->duplicates, + list) { + list_del(&pqp->list); + kfree(pqp); + } + kfree(entry); + } else { + /* This entry contains duplicates so it shouldn't be removed */ + ret = false; + goto out; + } + } + } + +out: + return ret; +} + +static int add_promisc_qp(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + struct mlx4_steer_index *entry; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp; + u32 members_count; + u32 prot; + int i; + bool found; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + mutex_lock(&priv->mcg_table.mutex); + + if (get_promisc_qp(dev, port, steer, qpn)) { + err = 0; /* Noting to do, already exists */ + goto out_mutex; + } + + pqp = kmalloc(sizeof *pqp, GFP_KERNEL); + if (!pqp) { + err = -ENOMEM; + goto out_mutex; + } + pqp->qpn = qpn; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = -ENOMEM; + goto out_alloc; + } + mgm = mailbox->buf; + + if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { + /* The promisc QP needs to be added for each one of the steering + * entries. If it already exists, needs to be added as + * a duplicate for this entry. + */ + list_for_each_entry(entry, + &s_steer->steer_entries[steer], + list) { + err = mlx4_READ_ENTRY(dev, entry->index, mailbox); + if (err) + goto out_mailbox; + + members_count = be32_to_cpu(mgm->members_count) & + 0xffffff; + prot = be32_to_cpu(mgm->members_count) >> 30; + found = false; + for (i = 0; i < members_count; i++) { + if ((be32_to_cpu(mgm->qp[i]) & + MGM_QPN_MASK) == qpn) { + /* Entry already exists. + * Add to duplicates. + */ + dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); + if (!dqp) { + err = -ENOMEM; + goto out_mailbox; + } + dqp->qpn = qpn; + list_add_tail(&dqp->list, + &entry->duplicates); + found = true; + } + } + if (!found) { + /* Need to add the qpn to mgm */ + if (members_count == + dev->caps.num_qp_per_mgm) { + /* entry is full */ + err = -ENOMEM; + goto out_mailbox; + } + mgm->qp[members_count++] = + cpu_to_be32(qpn & MGM_QPN_MASK); + mgm->members_count = + cpu_to_be32(members_count | + (prot << 30)); + err = mlx4_WRITE_ENTRY(dev, entry->index, + mailbox); + if (err) + goto out_mailbox; + } + } + } + + /* add the new qpn to list of promisc qps */ + list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); + /* now need to add all the promisc qps to default entry */ + memset(mgm, 0, sizeof *mgm); + members_count = 0; + list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { + if (members_count == dev->caps.num_qp_per_mgm) { + /* entry is full */ + err = -ENOMEM; + goto out_list; + } + mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); + } + mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); + + err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); + if (err) + goto out_list; + + mlx4_free_cmd_mailbox(dev, mailbox); + mutex_unlock(&priv->mcg_table.mutex); + return 0; + +out_list: + list_del(&pqp->list); +out_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); +out_alloc: + kfree(pqp); +out_mutex: + mutex_unlock(&priv->mcg_table.mutex); + return err; +} + +static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, u32 qpn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_steer *s_steer; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + struct mlx4_steer_index *entry, *tmp_entry; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp; + u32 members_count; + bool found; + bool back_to_list = false; + int i; + int err; + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + mutex_lock(&priv->mcg_table.mutex); + + pqp = get_promisc_qp(dev, port, steer, qpn); + if (unlikely(!pqp)) { + mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); + /* nothing to do */ + err = 0; + goto out_mutex; + } + + /*remove from list of promisc qps */ + list_del(&pqp->list); + + /* set the default entry not to include the removed one */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = -ENOMEM; + back_to_list = true; + goto out_list; + } + mgm = mailbox->buf; + members_count = 0; + list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) + mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); + mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); + + err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); + if (err) + goto out_mailbox; + + if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { + /* Remove the QP from all the steering entries */ + list_for_each_entry_safe(entry, tmp_entry, + &s_steer->steer_entries[steer], + list) { + found = false; + list_for_each_entry(dqp, &entry->duplicates, list) { + if (dqp->qpn == qpn) { + found = true; + break; + } + } + if (found) { + /* A duplicate, no need to change the MGM, + * only update the duplicates list + */ + list_del(&dqp->list); + kfree(dqp); + } else { + int loc = -1; + + err = mlx4_READ_ENTRY(dev, + entry->index, + mailbox); + if (err) + goto out_mailbox; + members_count = + be32_to_cpu(mgm->members_count) & + 0xffffff; + if (!members_count) { + mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n", + qpn, entry->index); + list_del(&entry->list); + kfree(entry); + continue; + } + + for (i = 0; i < members_count; ++i) + if ((be32_to_cpu(mgm->qp[i]) & + MGM_QPN_MASK) == qpn) { + loc = i; + break; + } + + if (loc < 0) { + mlx4_err(dev, "QP %06x wasn't found in entry %d\n", + qpn, entry->index); + err = -EINVAL; + goto out_mailbox; + } + + /* Copy the last QP in this MGM + * over removed QP + */ + mgm->qp[loc] = mgm->qp[members_count - 1]; + mgm->qp[members_count - 1] = 0; + mgm->members_count = + cpu_to_be32(--members_count | + (MLX4_PROT_ETH << 30)); + + err = mlx4_WRITE_ENTRY(dev, + entry->index, + mailbox); + if (err) + goto out_mailbox; + } + } + } + +out_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); +out_list: + if (back_to_list) + list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); + else + kfree(pqp); +out_mutex: + mutex_unlock(&priv->mcg_table.mutex); + return err; +} + +/* + * Caller must hold MCG table semaphore. gid and mgm parameters must + * be properly aligned for command interface. + * + * Returns 0 unless a firmware command error occurs. + * + * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 + * and *mgm holds MGM entry. + * + * if GID is found in AMGM, *index = index in AMGM, *prev = index of + * previous entry in hash chain and *mgm holds AMGM entry. + * + * If no AMGM exists for given gid, *index = -1, *prev = index of last + * entry in hash chain and *mgm holds end of hash chain. + */ +static int find_entry(struct mlx4_dev *dev, u8 port, + u8 *gid, enum mlx4_protocol prot, + struct mlx4_cmd_mailbox *mgm_mailbox, + int *prev, int *index) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm = mgm_mailbox->buf; + u8 *mgid; + int err; + u16 hash; + u8 op_mod = (prot == MLX4_PROT_ETH) ? + !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + mgid = mailbox->buf; + + memcpy(mgid, gid, 16); + + err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + return err; + + if (0) + mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); + + *index = hash; + *prev = -1; + + do { + err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); + if (err) + return err; + + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { + if (*index != hash) { + mlx4_err(dev, "Found zero MGID in AMGM\n"); + err = -EINVAL; + } + return err; + } + + if (!memcmp(mgm->gid, gid, 16) && + be32_to_cpu(mgm->members_count) >> 30 == prot) + return err; + + *prev = *index; + *index = be32_to_cpu(mgm->next_gid_index) >> 6; + } while (*index); + + *index = -1; + return err; +} + +static const u8 __promisc_mode[] = { + [MLX4_FS_REGULAR] = 0x0, + [MLX4_FS_ALL_DEFAULT] = 0x1, + [MLX4_FS_MC_DEFAULT] = 0x3, + [MLX4_FS_UC_SNIFFER] = 0x4, + [MLX4_FS_MC_SNIFFER] = 0x5, +}; + +int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, + enum mlx4_net_trans_promisc_mode flow_type) +{ + if (flow_type >= MLX4_FS_MODE_NUM) { + mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); + return -EINVAL; + } + return __promisc_mode[flow_type]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); + +static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, + struct mlx4_net_trans_rule_hw_ctrl *hw) +{ + u8 flags = 0; + + flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; + flags |= ctrl->exclusive ? (1 << 2) : 0; + flags |= ctrl->allow_loopback ? (1 << 3) : 0; + + hw->flags = flags; + hw->type = __promisc_mode[ctrl->promisc_mode]; + hw->prio = cpu_to_be16(ctrl->priority); + hw->port = ctrl->port; + hw->qpn = cpu_to_be32(ctrl->qpn); +} + +const u16 __sw_id_hw[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, + [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, + [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, + [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, + [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006, + [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008 +}; + +int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + return __sw_id_hw[id]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); + +static const int __rule_hw_sz[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = + sizeof(struct mlx4_net_trans_rule_hw_eth), + [MLX4_NET_TRANS_RULE_ID_IB] = + sizeof(struct mlx4_net_trans_rule_hw_ib), + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, + [MLX4_NET_TRANS_RULE_ID_IPV4] = + sizeof(struct mlx4_net_trans_rule_hw_ipv4), + [MLX4_NET_TRANS_RULE_ID_TCP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_UDP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_VXLAN] = + sizeof(struct mlx4_net_trans_rule_hw_vxlan) +}; + +int mlx4_hw_rule_sz(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + + return __rule_hw_sz[id]; +} +EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); + +static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, + struct _rule_hw *rule_hw) +{ + if (mlx4_hw_rule_sz(dev, spec->id) < 0) + return -EINVAL; + memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); + rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); + rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; + + switch (spec->id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); + memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, + ETH_ALEN); + memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); + memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, + ETH_ALEN); + if (spec->eth.ether_type_enable) { + rule_hw->eth.ether_type_enable = 1; + rule_hw->eth.ether_type = spec->eth.ether_type; + } + rule_hw->eth.vlan_tag = spec->eth.vlan_id; + rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_IB: + rule_hw->ib.l3_qpn = spec->ib.l3_qpn; + rule_hw->ib.qpn_mask = spec->ib.qpn_msk; + memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); + memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); + break; + + case MLX4_NET_TRANS_RULE_ID_IPV6: + return -EOPNOTSUPP; + + case MLX4_NET_TRANS_RULE_ID_IPV4: + rule_hw->ipv4.src_ip = spec->ipv4.src_ip; + rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; + rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; + rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; + rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; + rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; + rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_VXLAN: + rule_hw->vxlan.vni = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); + rule_hw->vxlan.vni_mask = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); + break; + + default: + return -EINVAL; + } + + return __rule_hw_sz[spec->id]; +} + +static void mlx4_err_rule(struct mlx4_dev *dev, char *str, + struct mlx4_net_trans_rule *rule) +{ +#define BUF_SIZE 256 + struct mlx4_spec_list *cur; + char buf[BUF_SIZE]; + int len = 0; + + mlx4_err(dev, "%s", str); + len += snprintf(buf + len, BUF_SIZE - len, + "port = %d prio = 0x%x qp = 0x%x ", + rule->port, rule->priority, rule->qpn); + + list_for_each_entry(cur, &rule->list, list) { + switch (cur->id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + len += snprintf(buf + len, BUF_SIZE - len, + "dmac = %pM ", &cur->eth.dst_mac); + if (cur->eth.ether_type) + len += snprintf(buf + len, BUF_SIZE - len, + "ethertype = 0x%x ", + be16_to_cpu(cur->eth.ether_type)); + if (cur->eth.vlan_id) + len += snprintf(buf + len, BUF_SIZE - len, + "vlan-id = %d ", + be16_to_cpu(cur->eth.vlan_id)); + break; + + case MLX4_NET_TRANS_RULE_ID_IPV4: + if (cur->ipv4.src_ip) + len += snprintf(buf + len, BUF_SIZE - len, + "src-ip = %pI4 ", + &cur->ipv4.src_ip); + if (cur->ipv4.dst_ip) + len += snprintf(buf + len, BUF_SIZE - len, + "dst-ip = %pI4 ", + &cur->ipv4.dst_ip); + break; + + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + if (cur->tcp_udp.src_port) + len += snprintf(buf + len, BUF_SIZE - len, + "src-port = %d ", + be16_to_cpu(cur->tcp_udp.src_port)); + if (cur->tcp_udp.dst_port) + len += snprintf(buf + len, BUF_SIZE - len, + "dst-port = %d ", + be16_to_cpu(cur->tcp_udp.dst_port)); + break; + + case MLX4_NET_TRANS_RULE_ID_IB: + len += snprintf(buf + len, BUF_SIZE - len, + "dst-gid = %pI6\n", cur->ib.dst_gid); + len += snprintf(buf + len, BUF_SIZE - len, + "dst-gid-mask = %pI6\n", + cur->ib.dst_gid_msk); + break; + + case MLX4_NET_TRANS_RULE_ID_VXLAN: + len += snprintf(buf + len, BUF_SIZE - len, + "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); + break; + case MLX4_NET_TRANS_RULE_ID_IPV6: + break; + + default: + break; + } + } + len += snprintf(buf + len, BUF_SIZE - len, "\n"); + mlx4_err(dev, "%s", buf); + + if (len >= BUF_SIZE) + mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n"); +} + +int mlx4_flow_attach(struct mlx4_dev *dev, + struct mlx4_net_trans_rule *rule, u64 *reg_id) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_spec_list *cur; + u32 size = 0; + int ret; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + trans_rule_ctrl_to_hw(rule, mailbox->buf); + + size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); + + list_for_each_entry(cur, &rule->list, list) { + ret = parse_trans_rule(dev, cur, mailbox->buf + size); + if (ret < 0) { + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; + } + size += ret; + } + + ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); + if (ret == -ENOMEM) { + mlx4_err_rule(dev, + "mcg table is full. Fail to register network rule\n", + rule); + } else if (ret) { + if (ret == -ENXIO) { + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) + mlx4_err_rule(dev, + "DMFS is not enabled, " + "failed to register network rule.\n", + rule); + else + mlx4_err_rule(dev, + "Rule exceeds the dmfs_high_rate_mode limitations, " + "failed to register network rule.\n", + rule); + + } else { + mlx4_err_rule(dev, "Fail to register network rule.\n", rule); + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_flow_attach); + +int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) +{ + int err; + + err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); + if (err) + mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", + reg_id); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_flow_detach); + +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, + int port, int qpn, u16 prio, u64 *reg_id) +{ + int err; + struct mlx4_spec_list spec_eth_outer = { {NULL} }; + struct mlx4_spec_list spec_vxlan = { {NULL} }; + struct mlx4_spec_list spec_eth_inner = { {NULL} }; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + }; + + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + rule.port = port; + rule.qpn = qpn; + rule.priority = prio; + INIT_LIST_HEAD(&rule.list); + + spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); + memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + + spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ + spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ + + list_add_tail(&spec_eth_outer.list, &rule.list); + list_add_tail(&spec_vxlan.list, &rule.list); + list_add_tail(&spec_eth_inner.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + return err; +} +EXPORT_SYMBOL(mlx4_tunnel_steer_add); + +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn) +{ + int err; + u64 in_param; + + in_param = ((u64) min_range_qpn) << 32; + in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; + + err = mlx4_cmd(dev, in_param, 0, 0, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); + +int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot, + enum mlx4_steer_type steer) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + int index, prev; + int link = 0; + int i; + int err; + u8 port = gid[5]; + u8 new_entry = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + mutex_lock(&priv->mcg_table.mutex); + err = find_entry(dev, port, gid, prot, + mailbox, &prev, &index); + if (err) + goto out; + + if (index != -1) { + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { + new_entry = 1; + memcpy(mgm->gid, gid, 16); + } + } else { + link = 1; + + index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); + if (index == -1) { + mlx4_err(dev, "No AMGM entries left\n"); + err = -ENOMEM; + goto out; + } + index += dev->caps.num_mgms; + + new_entry = 1; + memset(mgm, 0, sizeof *mgm); + memcpy(mgm->gid, gid, 16); + } + + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; + if (members_count == dev->caps.num_qp_per_mgm) { + mlx4_err(dev, "MGM at index %x is full\n", index); + err = -ENOMEM; + goto out; + } + + for (i = 0; i < members_count; ++i) + if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { + mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); + err = 0; + goto out; + } + + if (block_mcast_loopback) + mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | + (1U << MGM_BLCK_LB_BIT)); + else + mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); + + mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); + + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + if (err) + goto out; + + if (!link) + goto out; + + err = mlx4_READ_ENTRY(dev, prev, mailbox); + if (err) + goto out; + + mgm->next_gid_index = cpu_to_be32(index << 6); + + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); + if (err) + goto out; + +out: + if (prot == MLX4_PROT_ETH) { + /* manage the steering entry for promisc mode */ + if (new_entry) + new_steering_entry(dev, port, steer, index, qp->qpn); + else + existing_steering_entry(dev, port, steer, + index, qp->qpn); + } + if (err && link && index != -1) { + if (index < dev->caps.num_mgms) + mlx4_warn(dev, "Got AMGM index %d < %d\n", + index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + index - dev->caps.num_mgms, MLX4_USE_RR); + } + mutex_unlock(&priv->mcg_table.mutex); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot, enum mlx4_steer_type steer) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + int prev, index; + int i, loc = -1; + int err; + u8 port = gid[5]; + bool removed_entry = false; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + mutex_lock(&priv->mcg_table.mutex); + + err = find_entry(dev, port, gid, prot, + mailbox, &prev, &index); + if (err) + goto out; + + if (index == -1) { + mlx4_err(dev, "MGID %pI6 not found\n", gid); + err = -EINVAL; + goto out; + } + + /* If this QP is also a promisc QP, it shouldn't be removed only if + * at least one none promisc QP is also attached to this MCG + */ + if (prot == MLX4_PROT_ETH && + check_duplicate_entry(dev, port, steer, index, qp->qpn) && + !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) + goto out; + + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; + for (i = 0; i < members_count; ++i) + if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { + loc = i; + break; + } + + if (loc == -1) { + mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); + err = -EINVAL; + goto out; + } + + /* copy the last QP in this MGM over removed QP */ + mgm->qp[loc] = mgm->qp[members_count - 1]; + mgm->qp[members_count - 1] = 0; + mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); + + if (prot == MLX4_PROT_ETH) + removed_entry = can_remove_steering_entry(dev, port, steer, + index, qp->qpn); + if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + goto out; + } + + /* We are going to delete the entry, members count should be 0 */ + mgm->members_count = cpu_to_be32((u32) prot << 30); + + if (prev == -1) { + /* Remove entry from MGM */ + int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; + if (amgm_index) { + err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); + if (err) + goto out; + } else + memset(mgm->gid, 0, 16); + + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + if (err) + goto out; + + if (amgm_index) { + if (amgm_index < dev->caps.num_mgms) + mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n", + index, amgm_index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + amgm_index - dev->caps.num_mgms, MLX4_USE_RR); + } + } else { + /* Remove entry from AMGM */ + int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; + err = mlx4_READ_ENTRY(dev, prev, mailbox); + if (err) + goto out; + + mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); + + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); + if (err) + goto out; + + if (index < dev->caps.num_mgms) + mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n", + prev, index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + index - dev->caps.num_mgms, MLX4_USE_RR); + } + +out: + mutex_unlock(&priv->mcg_table.mutex); + + mlx4_free_cmd_mailbox(dev, mailbox); + if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + /* In case device is under an error, return success as a closing command */ + err = 0; + return err; +} + +static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 attach, u8 block_loopback, + enum mlx4_protocol prot) +{ + struct mlx4_cmd_mailbox *mailbox; + int err = 0; + int qpn; + + if (!mlx4_is_mfunc(dev)) + return -EBADF; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, gid, 16); + qpn = qp->qpn; + qpn |= (prot << 28); + if (attach && block_loopback) + qpn |= (1 << 31); + + err = mlx4_cmd(dev, mailbox->dma, qpn, attach, + MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + if (err && !attach && + dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + err = 0; + return err; +} + +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 port, + int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id) +{ + struct mlx4_spec_list spec = { {NULL} }; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .promisc_mode = MLX4_FS_REGULAR, + .priority = MLX4_DOMAIN_NIC, + }; + + rule.allow_loopback = !block_mcast_loopback; + rule.port = port; + rule.qpn = qp->qpn; + INIT_LIST_HEAD(&rule.list); + + switch (prot) { + case MLX4_PROT_ETH: + spec.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); + memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + break; + + case MLX4_PROT_IB_IPV6: + spec.id = MLX4_NET_TRANS_RULE_ID_IB; + memcpy(spec.ib.dst_gid, gid, 16); + memset(&spec.ib.dst_gid_msk, 0xff, 16); + break; + default: + return -EINVAL; + } + list_add_tail(&spec.list, &rule.list); + + return mlx4_flow_attach(dev, &rule, reg_id); +} + +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + u8 port, int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_A0: + if (prot == MLX4_PROT_ETH) + return 0; + + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_MC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + return mlx4_qp_attach_common(dev, qp, gid, + block_mcast_loopback, prot, + MLX4_MC_STEER); + + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, + block_mcast_loopback, + prot, reg_id); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(mlx4_multicast_attach); + +int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot, u64 reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_A0: + if (prot == MLX4_PROT_ETH) + return 0; + + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_MC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + + return mlx4_qp_detach_common(dev, qp, gid, prot, + MLX4_MC_STEER); + + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_flow_detach(dev, reg_id); + + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(mlx4_multicast_detach); + +int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, + u32 qpn, enum mlx4_net_trans_promisc_mode mode) +{ + struct mlx4_net_trans_rule rule; + u64 *regid_p; + + switch (mode) { + case MLX4_FS_ALL_DEFAULT: + regid_p = &dev->regid_promisc_array[port]; + break; + case MLX4_FS_MC_DEFAULT: + regid_p = &dev->regid_allmulti_array[port]; + break; + default: + return -1; + } + + if (*regid_p != 0) + return -1; + + rule.promisc_mode = mode; + rule.port = port; + rule.qpn = qpn; + INIT_LIST_HEAD(&rule.list); + mlx4_err(dev, "going promisc on %x\n", port); + + return mlx4_flow_attach(dev, &rule, regid_p); +} +EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); + +int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, + enum mlx4_net_trans_promisc_mode mode) +{ + int ret; + u64 *regid_p; + + switch (mode) { + case MLX4_FS_ALL_DEFAULT: + regid_p = &dev->regid_promisc_array[port]; + break; + case MLX4_FS_MC_DEFAULT: + regid_p = &dev->regid_allmulti_array[port]; + break; + default: + return -1; + } + + if (*regid_p == 0) + return -1; + + ret = mlx4_flow_detach(dev, *regid_p); + if (ret == 0) + *regid_p = 0; + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); + +int mlx4_unicast_attach(struct mlx4_dev *dev, + struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot) +{ + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_UC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + + return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, + prot, MLX4_UC_STEER); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_attach); + +int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot) +{ + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_UC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + + return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_detach); + +int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u32 qpn = (u32) vhcr->in_param & 0xffffffff; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); + enum mlx4_steer_type steer = vhcr->in_modifier; + + if (port < 0) + return -EINVAL; + + /* Promiscuous unicast is not allowed in mfunc */ + if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) + return 0; + + if (vhcr->op_modifier) + return add_promisc_qp(dev, port, steer, qpn); + else + return remove_promisc_qp(dev, port, steer, qpn); +} + +static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, + enum mlx4_steer_type steer, u8 add, u8 port) +{ + return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, + MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); + + return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); + +int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); + + return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); + +int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); + + return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); + +int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); + + return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); + +int mlx4_init_mcg_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + /* No need for mcg_table when fw managed the mcg table*/ + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) + return 0; + err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, + dev->caps.num_amgms - 1, 0, 0); + if (err) + return err; + + mutex_init(&priv->mcg_table.mutex); + + return 0; +} + +void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) +{ + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h new file mode 100644 index 000000000..502d3dd2c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -0,0 +1,1437 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_H +#define MLX4_H + +#include <linux/mutex.h> +#include <linux/radix-tree.h> +#include <linux/rbtree.h> +#include <linux/timer.h> +#include <linux/semaphore.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> + +#include <linux/mlx4/device.h> +#include <linux/mlx4/driver.h> +#include <linux/mlx4/doorbell.h> +#include <linux/mlx4/cmd.h> +#include "fw_qos.h" + +#define DRV_NAME "mlx4_core" +#define PFX DRV_NAME ": " +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb, 2014" + +#define MLX4_FS_UDP_UC_EN (1 << 1) +#define MLX4_FS_TCP_UC_EN (1 << 2) +#define MLX4_FS_NUM_OF_L2_ADDR 8 +#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 +#define MLX4_FS_NUM_MCG (1 << 17) + +#define INIT_HCA_TPT_MW_ENABLE (1 << 7) + +enum { + MLX4_HCR_BASE = 0x80680, + MLX4_HCR_SIZE = 0x0001c, + MLX4_CLR_INT_SIZE = 0x00008, + MLX4_SLAVE_COMM_BASE = 0x0, + MLX4_COMM_PAGESIZE = 0x1000, + MLX4_CLOCK_SIZE = 0x00008, + MLX4_COMM_CHAN_CAPS = 0x8, + MLX4_COMM_CHAN_FLAGS = 0xc +}; + +enum { + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10, + MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, + MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, + MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), + MLX4_MTT_ENTRY_PER_SEG = 8, +}; + +enum { + MLX4_NUM_PDS = 1 << 15 +}; + +enum { + MLX4_CMPT_TYPE_QP = 0, + MLX4_CMPT_TYPE_SRQ = 1, + MLX4_CMPT_TYPE_CQ = 2, + MLX4_CMPT_TYPE_EQ = 3, + MLX4_CMPT_NUM_TYPE +}; + +enum { + MLX4_CMPT_SHIFT = 24, + MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT +}; + +enum mlx4_mpt_state { + MLX4_MPT_DISABLED = 0, + MLX4_MPT_EN_HW, + MLX4_MPT_EN_SW +}; + +#define MLX4_COMM_TIME 10000 +#define MLX4_COMM_OFFLINE_TIME_OUT 30000 +#define MLX4_COMM_CMD_NA_OP 0x0 + + +enum { + MLX4_COMM_CMD_RESET, + MLX4_COMM_CMD_VHCR0, + MLX4_COMM_CMD_VHCR1, + MLX4_COMM_CMD_VHCR2, + MLX4_COMM_CMD_VHCR_EN, + MLX4_COMM_CMD_VHCR_POST, + MLX4_COMM_CMD_FLR = 254 +}; + +enum { + MLX4_VF_SMI_DISABLED, + MLX4_VF_SMI_ENABLED +}; + +/*The flag indicates that the slave should delay the RESET cmd*/ +#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb +/*indicates how many retries will be done if we are in the middle of FLR*/ +#define NUM_OF_RESET_RETRIES 10 +#define SLEEP_TIME_IN_RESET (2 * 1000) +enum mlx4_resource { + RES_QP, + RES_CQ, + RES_SRQ, + RES_XRCD, + RES_MPT, + RES_MTT, + RES_MAC, + RES_VLAN, + RES_EQ, + RES_COUNTER, + RES_FS_RULE, + MLX4_NUM_OF_RESOURCE_TYPE +}; + +enum mlx4_alloc_mode { + RES_OP_RESERVE, + RES_OP_RESERVE_AND_MAP, + RES_OP_MAP_ICM, +}; + +enum mlx4_res_tracker_free_type { + RES_TR_FREE_ALL, + RES_TR_FREE_SLAVES_ONLY, + RES_TR_FREE_STRUCTS_ONLY, +}; + +/* + *Virtual HCR structures. + * mlx4_vhcr is the sw representation, in machine endianness + * + * mlx4_vhcr_cmd is the formalized structure, the one that is passed + * to FW to go through communication channel. + * It is big endian, and has the same structure as the physical HCR + * used by command interface + */ +struct mlx4_vhcr { + u64 in_param; + u64 out_param; + u32 in_modifier; + u32 errno; + u16 op; + u16 token; + u8 op_modifier; + u8 e_bit; +}; + +struct mlx4_vhcr_cmd { + __be64 in_param; + __be32 in_modifier; + u32 reserved1; + __be64 out_param; + __be16 token; + u16 reserved; + u8 status; + u8 flags; + __be16 opcode; +}; + +struct mlx4_cmd_info { + u16 opcode; + bool has_inbox; + bool has_outbox; + bool out_is_imm; + bool encode_slave_id; + int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox); + int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +}; + +#ifdef CONFIG_MLX4_DEBUG +extern int mlx4_debug_level; +#else /* CONFIG_MLX4_DEBUG */ +#define mlx4_debug_level (0) +#endif /* CONFIG_MLX4_DEBUG */ + +#define mlx4_dbg(mdev, format, ...) \ +do { \ + if (mlx4_debug_level) \ + dev_printk(KERN_DEBUG, \ + &(mdev)->persist->pdev->dev, format, \ + ##__VA_ARGS__); \ +} while (0) + +#define mlx4_err(mdev, format, ...) \ + dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) +#define mlx4_info(mdev, format, ...) \ + dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) +#define mlx4_warn(mdev, format, ...) \ + dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) + +extern int mlx4_log_num_mgm_entry_size; +extern int log_mtts_per_seg; +extern int mlx4_internal_err_reset; + +#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ + MLX4_MFUNC_MAX)) +#define ALL_SLAVES 0xff + +struct mlx4_bitmap { + u32 last; + u32 top; + u32 max; + u32 reserved_top; + u32 mask; + u32 avail; + u32 effective_len; + spinlock_t lock; + unsigned long *table; +}; + +struct mlx4_buddy { + unsigned long **bits; + unsigned int *num_free; + u32 max_order; + spinlock_t lock; +}; + +struct mlx4_icm; + +struct mlx4_icm_table { + u64 virt; + int num_icm; + u32 num_obj; + int obj_size; + int lowmem; + int coherent; + struct mutex mutex; + struct mlx4_icm **icm; +}; + +#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) +#define MLX4_MPT_FLAG_FREE (0x3UL << 28) +#define MLX4_MPT_FLAG_MIO (1 << 17) +#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) +#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) +#define MLX4_MPT_FLAG_REGION (1 << 8) + +#define MLX4_MPT_PD_MASK (0x1FFFFUL) +#define MLX4_MPT_PD_VF_MASK (0xFE0000UL) +#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) +#define MLX4_MPT_PD_FLAG_RAE (1 << 28) +#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) + +#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7) + +#define MLX4_MPT_STATUS_SW 0xF0 +#define MLX4_MPT_STATUS_HW 0x00 + +#define MLX4_CQE_SIZE_MASK_STRIDE 0x3 +#define MLX4_EQE_SIZE_MASK_STRIDE 0x30 + +/* + * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. + */ +struct mlx4_mpt_entry { + __be32 flags; + __be32 qpn; + __be32 key; + __be32 pd_flags; + __be64 start; + __be64 length; + __be32 lkey; + __be32 win_cnt; + u8 reserved1[3]; + u8 mtt_rep; + __be64 mtt_addr; + __be32 mtt_sz; + __be32 entity_size; + __be32 first_byte_offset; +} __packed; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +struct mlx4_eq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + u8 log_eq_size; + u8 reserved2[4]; + u8 eq_period; + u8 reserved3; + u8 eq_max_count; + u8 reserved4[3]; + u8 intr; + u8 log_page_size; + u8 reserved5[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved6[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved7[4]; +}; + +struct mlx4_cq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + __be32 logsize_usrpage; + __be16 cq_period; + __be16 cq_max_count; + u8 reserved2[3]; + u8 comp_eqn; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + u32 reserved4[2]; + __be64 db_rec_addr; +}; + +struct mlx4_srq_context { + __be32 state_logsize_srqn; + u8 logstride; + u8 reserved1; + __be16 xrcd; + __be32 pg_offset_cqn; + u32 reserved2; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved4; + __be16 wqe_counter; + u32 reserved5; + __be64 db_rec_addr; +}; + +struct mlx4_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + +struct mlx4_eq { + struct mlx4_dev *dev; + void __iomem *doorbell; + int eqn; + u32 cons_index; + u16 irq; + u16 have_irq; + int nent; + struct mlx4_buf_list *page_list; + struct mlx4_mtt mtt; + struct mlx4_eq_tasklet tasklet_ctx; +}; + +struct mlx4_slave_eqe { + u8 type; + u8 port; + u32 param; +}; + +struct mlx4_slave_event_eq_info { + int eqn; + u16 token; +}; + +struct mlx4_profile { + int num_qp; + int rdmarc_per_qp; + int num_srq; + int num_cq; + int num_mcg; + int num_mpt; + unsigned num_mtt; +}; + +struct mlx4_fw { + u64 clr_int_base; + u64 catas_offset; + u64 comm_base; + u64 clock_offset; + struct mlx4_icm *fw_icm; + struct mlx4_icm *aux_icm; + u32 catas_size; + u16 fw_pages; + u8 clr_int_bar; + u8 catas_bar; + u8 comm_bar; + u8 clock_bar; +}; + +struct mlx4_comm { + u32 slave_write; + u32 slave_read; +}; + +enum { + MLX4_MCAST_CONFIG = 0, + MLX4_MCAST_DISABLE = 1, + MLX4_MCAST_ENABLE = 2, +}; + +#define VLAN_FLTR_SIZE 128 + +struct mlx4_vlan_fltr { + __be32 entry[VLAN_FLTR_SIZE]; +}; + +struct mlx4_mcast_entry { + struct list_head list; + u64 addr; +}; + +struct mlx4_promisc_qp { + struct list_head list; + u32 qpn; +}; + +struct mlx4_steer_index { + struct list_head list; + unsigned int index; + struct list_head duplicates; +}; + +#define MLX4_EVENT_TYPES_NUM 64 + +struct mlx4_slave_state { + u8 comm_toggle; + u8 last_cmd; + u8 init_port_mask; + bool active; + bool old_vlan_api; + u8 function; + dma_addr_t vhcr_dma; + u16 mtu[MLX4_MAX_PORTS + 1]; + __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; + struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; + struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; + struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; + /* event type to eq number lookup */ + struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM]; + u16 eq_pi; + u16 eq_ci; + spinlock_t lock; + /*initialized via the kzalloc*/ + u8 is_slave_going_down; + u32 cookie; + enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; +}; + +#define MLX4_VGT 4095 +#define NO_INDX (-1) + +struct mlx4_vport_state { + u64 mac; + u16 default_vlan; + u8 default_qos; + u32 tx_rate; + bool spoofchk; + u32 link_state; + u8 qos_vport; + __be64 guid; +}; + +struct mlx4_vf_admin_state { + struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; + u8 enable_smi[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_vport_oper_state { + struct mlx4_vport_state state; + int mac_idx; + int vlan_idx; +}; + +struct mlx4_vf_oper_state { + struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; + u8 smi_enabled[MLX4_MAX_PORTS + 1]; +}; + +struct slave_list { + struct mutex mutex; + struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; +}; + +struct resource_allocator { + spinlock_t alloc_lock; /* protect quotas */ + union { + int res_reserved; + int res_port_rsvd[MLX4_MAX_PORTS]; + }; + union { + int res_free; + int res_port_free[MLX4_MAX_PORTS]; + }; + int *quota; + int *allocated; + int *guaranteed; +}; + +struct mlx4_resource_tracker { + spinlock_t lock; + /* tree for each resources */ + struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; + /* num_of_slave's lists, one per slave */ + struct slave_list *slave_list; + struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE]; +}; + +#define SLAVE_EVENT_EQ_SIZE 128 +struct mlx4_slave_event_eq { + u32 eqn; + u32 cons; + u32 prod; + spinlock_t event_lock; + struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; +}; + +struct mlx4_qos_manager { + int num_of_qos_vfs; + DECLARE_BITMAP(priority_bm, MLX4_NUM_UP); +}; + +struct mlx4_master_qp0_state { + int proxy_qp0_active; + int qp0_active; + int port_active; +}; + +struct mlx4_mfunc_master_ctx { + struct mlx4_slave_state *slave_state; + struct mlx4_vf_admin_state *vf_admin; + struct mlx4_vf_oper_state *vf_oper; + struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; + int init_port_ref[MLX4_MAX_PORTS + 1]; + u16 max_mtu[MLX4_MAX_PORTS + 1]; + int disable_mcast_ref[MLX4_MAX_PORTS + 1]; + struct mlx4_resource_tracker res_tracker; + struct workqueue_struct *comm_wq; + struct work_struct comm_work; + struct work_struct slave_event_work; + struct work_struct slave_flr_event_work; + spinlock_t slave_state_lock; + __be32 comm_arm_bit_vector[4]; + struct mlx4_eqe cmd_eqe; + struct mlx4_slave_event_eq slave_eq; + struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; + struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_mfunc { + struct mlx4_comm __iomem *comm; + struct mlx4_vhcr_cmd *vhcr; + dma_addr_t vhcr_dma; + + struct mlx4_mfunc_master_ctx master; +}; + +#define MGM_QPN_MASK 0x00FFFFFF +#define MGM_BLCK_LB_BIT 30 + +struct mlx4_mgm { + __be32 next_gid_index; + __be32 members_count; + u32 reserved[2]; + u8 gid[16]; + __be32 qp[MLX4_MAX_QP_PER_MGM]; +}; + +struct mlx4_cmd { + struct pci_pool *pool; + void __iomem *hcr; + struct mutex slave_cmd_mutex; + struct semaphore poll_sem; + struct semaphore event_sem; + int max_cmds; + spinlock_t context_lock; + int free_head; + struct mlx4_cmd_context *context; + u16 token_mask; + u8 use_events; + u8 toggle; + u8 comm_toggle; + u8 initialized; +}; + +enum { + MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, + MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, + MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, +}; +struct mlx4_vf_immed_vlan_work { + struct work_struct work; + struct mlx4_priv *priv; + int flags; + int slave; + int vlan_ix; + int orig_vlan_ix; + u8 port; + u8 qos; + u8 qos_vport; + u16 vlan_id; + u16 orig_vlan_id; +}; + + +struct mlx4_uar_table { + struct mlx4_bitmap bitmap; +}; + +struct mlx4_mr_table { + struct mlx4_bitmap mpt_bitmap; + struct mlx4_buddy mtt_buddy; + u64 mtt_base; + u64 mpt_base; + struct mlx4_icm_table mtt_table; + struct mlx4_icm_table dmpt_table; +}; + +struct mlx4_cq_table { + struct mlx4_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; +}; + +struct mlx4_eq_table { + struct mlx4_bitmap bitmap; + char *irq_names; + void __iomem *clr_int; + void __iomem **uar_map; + u32 clr_mask; + struct mlx4_eq *eq; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; + int have_irq; + u8 inta_pin; +}; + +struct mlx4_srq_table { + struct mlx4_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; +}; + +enum mlx4_qp_table_zones { + MLX4_QP_TABLE_ZONE_GENERAL, + MLX4_QP_TABLE_ZONE_RSS, + MLX4_QP_TABLE_ZONE_RAW_ETH, + MLX4_QP_TABLE_ZONE_NUM +}; + +struct mlx4_qp_table { + struct mlx4_bitmap *bitmap_gen; + struct mlx4_zone_allocator *zones; + u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM]; + u32 rdmarc_base; + int rdmarc_shift; + spinlock_t lock; + struct mlx4_icm_table qp_table; + struct mlx4_icm_table auxc_table; + struct mlx4_icm_table altc_table; + struct mlx4_icm_table rdmarc_table; + struct mlx4_icm_table cmpt_table; +}; + +struct mlx4_mcg_table { + struct mutex mutex; + struct mlx4_bitmap bitmap; + struct mlx4_icm_table table; +}; + +struct mlx4_catas_err { + u32 __iomem *map; + struct timer_list timer; + struct list_head list; +}; + +#define MLX4_MAX_MAC_NUM 128 +#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) + +struct mlx4_mac_table { + __be64 entries[MLX4_MAX_MAC_NUM]; + int refs[MLX4_MAX_MAC_NUM]; + struct mutex mutex; + int total; + int max; +}; + +#define MLX4_ROCE_GID_ENTRY_SIZE 16 + +struct mlx4_roce_gid_entry { + u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; +}; + +struct mlx4_roce_gid_table { + struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; + struct mutex mutex; +}; + +#define MLX4_MAX_VLAN_NUM 128 +#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) + +struct mlx4_vlan_table { + __be32 entries[MLX4_MAX_VLAN_NUM]; + int refs[MLX4_MAX_VLAN_NUM]; + struct mutex mutex; + int total; + int max; +}; + +#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_PROMISC_SHIFT 31 +#define SET_PORT_MC_PROMISC_SHIFT 30 + +enum { + MCAST_DIRECT_ONLY = 0, + MCAST_DIRECT = 1, + MCAST_DEFAULT = 2 +}; + + +struct mlx4_set_port_general_context { + u16 reserved1; + u8 v_ignore_fcs; + u8 flags; + u8 ignore_fcs; + u8 reserved2; + __be16 mtu; + u8 pptx; + u8 pfctx; + u16 reserved3; + u8 pprx; + u8 pfcrx; + u16 reserved4; +}; + +struct mlx4_set_port_rqp_calc_context { + __be32 base_qpn; + u8 rererved; + u8 n_mac; + u8 n_vlan; + u8 n_prio; + u8 reserved2[3]; + u8 mac_miss; + u8 intra_no_vlan; + u8 no_vlan; + u8 intra_vlan_miss; + u8 vlan_miss; + u8 reserved3[3]; + u8 no_vlan_prio; + __be32 promisc; + __be32 mcast; +}; + +struct mlx4_port_info { + struct mlx4_dev *dev; + int port; + char dev_name[16]; + struct device_attribute port_attr; + enum mlx4_port_type tmp_type; + char dev_mtu_name[16]; + struct device_attribute port_mtu_attr; + struct mlx4_mac_table mac_table; + struct mlx4_vlan_table vlan_table; + struct mlx4_roce_gid_table gid_table; + int base_qpn; +}; + +struct mlx4_sense { + struct mlx4_dev *dev; + u8 do_sense_port[MLX4_MAX_PORTS + 1]; + u8 sense_allowed[MLX4_MAX_PORTS + 1]; + struct delayed_work sense_poll; +}; + +struct mlx4_msix_ctl { + u64 pool_bm; + struct mutex pool_lock; +}; + +struct mlx4_steer { + struct list_head promisc_qps[MLX4_NUM_STEERS]; + struct list_head steer_entries[MLX4_NUM_STEERS]; +}; + +enum { + MLX4_PCI_DEV_IS_VF = 1 << 0, + MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, +}; + +enum { + MLX4_NO_RR = 0, + MLX4_USE_RR = 1, +}; + +struct mlx4_priv { + struct mlx4_dev dev; + + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; + + int pci_dev_data; + int removed; + + struct list_head pgdir_list; + struct mutex pgdir_mutex; + + struct mlx4_fw fw; + struct mlx4_cmd cmd; + struct mlx4_mfunc mfunc; + + struct mlx4_bitmap pd_bitmap; + struct mlx4_bitmap xrcd_bitmap; + struct mlx4_uar_table uar_table; + struct mlx4_mr_table mr_table; + struct mlx4_cq_table cq_table; + struct mlx4_eq_table eq_table; + struct mlx4_srq_table srq_table; + struct mlx4_qp_table qp_table; + struct mlx4_mcg_table mcg_table; + struct mlx4_bitmap counters_bitmap; + + struct mlx4_catas_err catas_err; + + void __iomem *clr_base; + + struct mlx4_uar driver_uar; + void __iomem *kar; + struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; + struct mlx4_sense sense; + struct mutex port_mutex; + struct mlx4_msix_ctl msix_ctl; + struct mlx4_steer *steer; + struct list_head bf_list; + struct mutex bf_mutex; + struct io_mapping *bf_mapping; + void __iomem *clock_mapping; + int reserved_mtts; + int fs_hash_mode; + u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; + struct mlx4_port_map v2p; /* cached port mapping configuration */ + struct mutex bond_mutex; /* for bond mode */ + __be64 slave_node_guids[MLX4_MFUNC_MAX]; + + atomic_t opreq_count; + struct work_struct opreq_task; +}; + +static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) +{ + return container_of(dev, struct mlx4_priv, dev); +} + +#define MLX4_SENSE_RANGE (HZ * 3) + +extern struct workqueue_struct *mlx4_wq; + +u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); +u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, + int align, u32 skip_mask); +void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, + int use_rr); +u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); +int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, + u32 reserved_bot, u32 resetrved_top); +void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); + +int mlx4_reset(struct mlx4_dev *dev); + +int mlx4_alloc_eq_table(struct mlx4_dev *dev); +void mlx4_free_eq_table(struct mlx4_dev *dev); + +int mlx4_init_pd_table(struct mlx4_dev *dev); +int mlx4_init_xrcd_table(struct mlx4_dev *dev); +int mlx4_init_uar_table(struct mlx4_dev *dev); +int mlx4_init_mr_table(struct mlx4_dev *dev); +int mlx4_init_eq_table(struct mlx4_dev *dev); +int mlx4_init_cq_table(struct mlx4_dev *dev); +int mlx4_init_qp_table(struct mlx4_dev *dev); +int mlx4_init_srq_table(struct mlx4_dev *dev); +int mlx4_init_mcg_table(struct mlx4_dev *dev); + +void mlx4_cleanup_pd_table(struct mlx4_dev *dev); +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); +void mlx4_cleanup_uar_table(struct mlx4_dev *dev); +void mlx4_cleanup_mr_table(struct mlx4_dev *dev); +void mlx4_cleanup_eq_table(struct mlx4_dev *dev); +void mlx4_cleanup_cq_table(struct mlx4_dev *dev); +void mlx4_cleanup_qp_table(struct mlx4_dev *dev); +void mlx4_cleanup_srq_table(struct mlx4_dev *dev); +void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); +void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); +int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); +void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); +int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); +void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); +int __mlx4_mpt_reserve(struct mlx4_dev *dev); +void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); +void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); +u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); +void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); + +int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags); +void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); +int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); +void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); +int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list); +int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); +void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx); +int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); +void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); + +void mlx4_start_catas_poll(struct mlx4_dev *dev); +void mlx4_stop_catas_poll(struct mlx4_dev *dev); +int mlx4_catas_init(struct mlx4_dev *dev); +void mlx4_catas_end(struct mlx4_dev *dev); +int mlx4_restart_one(struct pci_dev *pdev); +int mlx4_register_device(struct mlx4_dev *dev); +void mlx4_unregister_device(struct mlx4_dev *dev); +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, + unsigned long param); + +struct mlx4_dev_cap; +struct mlx4_init_hca_param; + +u64 mlx4_make_profile(struct mlx4_dev *dev, + struct mlx4_profile *request, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca); +void mlx4_master_comm_channel(struct work_struct *work); +void mlx4_gen_slave_eqe(struct work_struct *work); +void mlx4_master_handle_slave_flr(struct work_struct *work); + +int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); + +enum { + MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, + MLX4_CMD_CLEANUP_POOL = 1UL << 1, + MLX4_CMD_CLEANUP_HCR = 1UL << 2, + MLX4_CMD_CLEANUP_VHCR = 1UL << 3, + MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 +}; + +int mlx4_cmd_init(struct mlx4_dev *dev); +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); +int mlx4_multi_func_init(struct mlx4_dev *dev); +int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev); +void mlx4_multi_func_cleanup(struct mlx4_dev *dev); +void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); +int mlx4_cmd_use_events(struct mlx4_dev *dev); +void mlx4_cmd_use_polling(struct mlx4_dev *dev); + +int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, + u16 op, unsigned long timeout); + +void mlx4_cq_tasklet_cb(unsigned long data); +void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); +void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); + +void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); + +void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); + +void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); + +int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, + enum mlx4_port_type *type); +void mlx4_do_sense_ports(struct mlx4_dev *dev, + enum mlx4_port_type *stype, + enum mlx4_port_type *defaults); +void mlx4_start_sense(struct mlx4_dev *dev); +void mlx4_stop_sense(struct mlx4_dev *dev); +void mlx4_sense_init(struct mlx4_dev *dev); +int mlx4_check_port_params(struct mlx4_dev *dev, + enum mlx4_port_type *port_type); +int mlx4_change_port_types(struct mlx4_dev *dev, + enum mlx4_port_type *port_types); + +void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); +void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table); +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); + +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); +/* resource tracker functions*/ +int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, + enum mlx4_resource resource_type, + u64 resource_id, int *slave); +void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); +int mlx4_init_resource_tracker(struct mlx4_dev *dev); + +void mlx4_free_resource_tracker(struct mlx4_dev *dev, + enum mlx4_res_tracker_free_type type); + +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); + +int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, + int *gid_tbl_len, int *pkey_tbl_len); + +int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot, enum mlx4_steer_type steer); +int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot, + enum mlx4_steer_type steer); +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 port, + int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id); +int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, + int port, void *buf); +int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, + struct mlx4_cmd_mailbox *outbox); +int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); +int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); + +static inline void set_param_l(u64 *arg, u32 val) +{ + *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; +} + +static inline void set_param_h(u64 *arg, u32 val) +{ + *arg = (*arg & 0xffffffff) | ((u64) val << 32); +} + +static inline u32 get_param_l(u64 *arg) +{ + return (u32) (*arg & 0xffffffff); +} + +static inline u32 get_param_h(u64 *arg) +{ + return (u32)(*arg >> 32); +} + +static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) +{ + return &mlx4_priv(dev)->mfunc.master.res_tracker.lock; +} + +#define NOT_MASKED_PD_BITS 17 + +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); + +void mlx4_init_quotas(struct mlx4_dev *dev); + +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); +/* Returns the VF index of slave */ +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); +int mlx4_config_mad_demux(struct mlx4_dev *dev); +int mlx4_do_bond(struct mlx4_dev *dev, bool enable); + +enum mlx4_zone_flags { + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1, + MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2, + MLX4_ZONE_USE_RR = 1UL << 3, +}; + +enum mlx4_zone_alloc_flags { + /* No two objects could overlap between zones. UID + * could be left unused. If this flag is given and + * two overlapped zones are used, an object will be free'd + * from the smallest possible matching zone. + */ + MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0, +}; + +struct mlx4_zone_allocator; + +/* Create a new zone allocator */ +struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags); + +/* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator + * <zone_alloc>. Allocating an object from this zone adds an offset <offset>. + * Similarly, when searching for an object to free, this offset it taken into + * account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap> + * is given through the MLX4_ZONE_USE_RR flag in <flags>. + * When an allocation fails, <zone_alloc> tries to allocate from other zones + * according to the policy set by <flags>. <puid> is the unique identifier + * received to this zone. + */ +int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, + struct mlx4_bitmap *bitmap, + u32 flags, + int priority, + int offset, + u32 *puid); + +/* Remove bitmap indicated by <uid> from <zone_alloc> */ +int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid); + +/* Delete the zone allocator <zone_alloc. This function doesn't destroy + * the attached bitmaps. + */ +void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc); + +/* Allocate <count> objects with align <align> and skip_mask <skip_mask> + * from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually + * allocated from is returned in <puid>. If the allocation fails, a negative + * number is returned. Otherwise, the offset of the first object is returned. + */ +u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, + int align, u32 skip_mask, u32 *puid); + +/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator + * <zones>. + */ +u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, + u32 uid, u32 obj, u32 count); + +/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of + * specifying the uid when freeing an object, zone allocator could figure it by + * itself. Other parameters are similar to mlx4_zone_free. + */ +u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count); + +/* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */ +struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid); + +#endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h new file mode 100644 index 000000000..909fcf803 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -0,0 +1,887 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _MLX4_EN_H_ +#define _MLX4_EN_H_ + +#include <linux/bitops.h> +#include <linux/compiler.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/net_tstamp.h> +#ifdef CONFIG_MLX4_EN_DCB +#include <linux/dcbnl.h> +#endif +#include <linux/cpu_rmap.h> +#include <linux/ptp_clock_kernel.h> + +#include <linux/mlx4/device.h> +#include <linux/mlx4/qp.h> +#include <linux/mlx4/cq.h> +#include <linux/mlx4/srq.h> +#include <linux/mlx4/doorbell.h> +#include <linux/mlx4/cmd.h> + +#include "en_port.h" +#include "mlx4_stats.h" + +#define DRV_NAME "mlx4_en" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb 2014" + +#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) + +/* + * Device constants + */ + + +#define MLX4_EN_PAGE_SHIFT 12 +#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) +#define DEF_RX_RINGS 16 +#define MAX_RX_RINGS 128 +#define MIN_RX_RINGS 4 +#define TXBB_SIZE 64 +#define HEADROOM (2048 / TXBB_SIZE + 1) +#define STAMP_STRIDE 64 +#define STAMP_DWORDS (STAMP_STRIDE / 4) +#define STAMP_SHIFT 31 +#define STAMP_VAL 0x7fffffff +#define STATS_DELAY (HZ / 4) +#define SERVICE_TASK_DELAY (HZ / 4) +#define MAX_NUM_OF_FS_RULES 256 + +#define MLX4_EN_FILTER_HASH_SHIFT 4 +#define MLX4_EN_FILTER_EXPIRY_QUOTA 60 + +/* Typical TSO descriptor with 16 gather entries is 352 bytes... */ +#define MAX_DESC_SIZE 512 +#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE) + +/* + * OS related constants and tunables + */ + +#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1 + +#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) + +/* Use the maximum between 16384 and a single page */ +#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) + +#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER + +/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU + * and 4K allocations) */ +enum { + FRAG_SZ0 = 1536 - NET_IP_ALIGN, + FRAG_SZ1 = 4096, + FRAG_SZ2 = 4096, + FRAG_SZ3 = MLX4_EN_ALLOC_SIZE +}; +#define MLX4_EN_MAX_RX_FRAGS 4 + +/* Maximum ring sizes */ +#define MLX4_EN_MAX_TX_SIZE 8192 +#define MLX4_EN_MAX_RX_SIZE 8192 + +/* Minimum ring size for our page-allocation scheme to work */ +#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) +#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) + +#define MLX4_EN_SMALL_PKT_SIZE 64 +#define MLX4_EN_MIN_TX_RING_P_UP 1 +#define MLX4_EN_MAX_TX_RING_P_UP 32 +#define MLX4_EN_NUM_UP 8 +#define MLX4_EN_DEF_TX_RING_SIZE 512 +#define MLX4_EN_DEF_RX_RING_SIZE 1024 +#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ + MLX4_EN_NUM_UP) + +#define MLX4_EN_DEFAULT_TX_WORK 256 + +/* Target number of packets to coalesce with interrupt moderation */ +#define MLX4_EN_RX_COAL_TARGET 44 +#define MLX4_EN_RX_COAL_TIME 0x10 + +#define MLX4_EN_TX_COAL_PKTS 16 +#define MLX4_EN_TX_COAL_TIME 0x10 + +#define MLX4_EN_RX_RATE_LOW 400000 +#define MLX4_EN_RX_COAL_TIME_LOW 0 +#define MLX4_EN_RX_RATE_HIGH 450000 +#define MLX4_EN_RX_COAL_TIME_HIGH 128 +#define MLX4_EN_RX_SIZE_THRESH 1024 +#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) +#define MLX4_EN_SAMPLE_INTERVAL 0 +#define MLX4_EN_AVG_PKT_SMALL 256 + +#define MLX4_EN_AUTO_CONF 0xffff + +#define MLX4_EN_DEF_RX_PAUSE 1 +#define MLX4_EN_DEF_TX_PAUSE 1 + +/* Interval between successive polls in the Tx routine when polling is used + instead of interrupts (in per-core Tx rings) - should be power of 2 */ +#define MLX4_EN_TX_POLL_MODER 16 +#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4) + +#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) +#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) +#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) + +#define MLX4_EN_MIN_MTU 46 +#define ETH_BCAST 0xffffffffffffULL + +#define MLX4_EN_LOOPBACK_RETRIES 5 +#define MLX4_EN_LOOPBACK_TIMEOUT 100 + +#ifdef MLX4_EN_PERF_STAT +/* Number of samples to 'average' */ +#define AVG_SIZE 128 +#define AVG_FACTOR 1024 + +#define INC_PERF_COUNTER(cnt) (++(cnt)) +#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add)) +#define AVG_PERF_COUNTER(cnt, sample) \ + ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE) +#define GET_PERF_COUNTER(cnt) (cnt) +#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR) + +#else + +#define INC_PERF_COUNTER(cnt) do {} while (0) +#define ADD_PERF_COUNTER(cnt, add) do {} while (0) +#define AVG_PERF_COUNTER(cnt, sample) do {} while (0) +#define GET_PERF_COUNTER(cnt) (0) +#define GET_AVG_PERF_COUNTER(cnt) (0) +#endif /* MLX4_EN_PERF_STAT */ + +/* Constants for TX flow */ +enum { + MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ + MAX_BF = 256, + MIN_PKT_LEN = 17, +}; + +/* + * Configurables + */ + +enum cq_type { + RX = 0, + TX = 1, +}; + + +/* + * Useful macros + */ +#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x)) +#define XNOR(x, y) (!(x) == !(y)) + + +struct mlx4_en_tx_info { + struct sk_buff *skb; + dma_addr_t map0_dma; + u32 map0_byte_count; + u32 nr_txbb; + u32 nr_bytes; + u8 linear; + u8 data_offset; + u8 inl; + u8 ts_requested; + u8 nr_maps; +} ____cacheline_aligned_in_smp; + + +#define MLX4_EN_BIT_DESC_OWN 0x80000000 +#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg) +#define MLX4_EN_MEMTYPE_PAD 0x100 +#define DS_SIZE sizeof(struct mlx4_wqe_data_seg) + + +struct mlx4_en_tx_desc { + struct mlx4_wqe_ctrl_seg ctrl; + union { + struct mlx4_wqe_data_seg data; /* at least one data segment */ + struct mlx4_wqe_lso_seg lso; + struct mlx4_wqe_inline_seg inl; + }; +}; + +#define MLX4_EN_USE_SRQ 0x01000000 + +#define MLX4_EN_CX3_LOW_ID 0x1000 +#define MLX4_EN_CX3_HIGH_ID 0x1005 + +struct mlx4_en_rx_alloc { + struct page *page; + dma_addr_t dma; + u32 page_offset; + u32 page_size; +}; + +struct mlx4_en_tx_ring { + /* cache line used and dirtied in tx completion + * (mlx4_en_free_tx_buf()) + */ + u32 last_nr_txbb; + u32 cons; + unsigned long wake_queue; + + /* cache line used and dirtied in mlx4_en_xmit() */ + u32 prod ____cacheline_aligned_in_smp; + unsigned long bytes; + unsigned long packets; + unsigned long tx_csum; + unsigned long tso_packets; + unsigned long xmit_more; + struct mlx4_bf bf; + unsigned long queue_stopped; + + /* Following part should be mostly read */ + cpumask_t affinity_mask; + struct mlx4_qp qp; + struct mlx4_hwq_resources wqres; + u32 size; /* number of TXBBs */ + u32 size_mask; + u16 stride; + u32 full_size; + u16 cqn; /* index of port CQ associated with this ring */ + u32 buf_size; + __be32 doorbell_qpn; + __be32 mr_key; + void *buf; + struct mlx4_en_tx_info *tx_info; + u8 *bounce_buf; + struct mlx4_qp_context context; + int qpn; + enum mlx4_qp_state qp_state; + u8 queue_index; + bool bf_enabled; + bool bf_alloced; + struct netdev_queue *tx_queue; + int hwtstamp_tx_type; +} ____cacheline_aligned_in_smp; + +struct mlx4_en_rx_desc { + /* actual number of entries depends on rx ring stride */ + struct mlx4_wqe_data_seg data[0]; +}; + +struct mlx4_en_rx_ring { + struct mlx4_hwq_resources wqres; + struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; + u32 size ; /* number of Rx descs*/ + u32 actual_size; + u32 size_mask; + u16 stride; + u16 log_stride; + u16 cqn; /* index of port CQ associated with this ring */ + u32 prod; + u32 cons; + u32 buf_size; + u8 fcs_del; + void *buf; + void *rx_info; + unsigned long bytes; + unsigned long packets; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned long yields; + unsigned long misses; + unsigned long cleaned; +#endif + unsigned long csum_ok; + unsigned long csum_none; + unsigned long csum_complete; + int hwtstamp_rx_filter; + cpumask_var_t affinity_mask; +}; + +struct mlx4_en_cq { + struct mlx4_cq mcq; + struct mlx4_hwq_resources wqres; + int ring; + struct net_device *dev; + struct napi_struct napi; + int size; + int buf_size; + unsigned vector; + enum cq_type is_tx; + u16 moder_time; + u16 moder_cnt; + struct mlx4_cqe *buf; +#define MLX4_EN_OPCODE_ERROR 0x1e + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define MLX4_EN_CQ_STATE_IDLE 0 +#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ +#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */ +#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL) +#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */ +#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */ +#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) +#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) + spinlock_t poll_lock; /* protects from LLS/napi conflicts */ +#endif /* CONFIG_NET_RX_BUSY_POLL */ + struct irq_desc *irq_desc; +}; + +struct mlx4_en_port_profile { + u32 flags; + u32 tx_ring_num; + u32 rx_ring_num; + u32 tx_ring_size; + u32 rx_ring_size; + u8 rx_pause; + u8 rx_ppp; + u8 tx_pause; + u8 tx_ppp; + int rss_rings; + int inline_thold; +}; + +struct mlx4_en_profile { + int udp_rss; + u8 rss_mask; + u32 active_ports; + u32 small_pkt_int; + u8 no_reset; + u8 num_tx_rings_p_up; + struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_en_dev { + struct mlx4_dev *dev; + struct pci_dev *pdev; + struct mutex state_lock; + struct net_device *pndev[MLX4_MAX_PORTS + 1]; + struct net_device *upper[MLX4_MAX_PORTS + 1]; + u32 port_cnt; + bool device_up; + struct mlx4_en_profile profile; + u32 LSO_support; + struct workqueue_struct *workqueue; + struct device *dma_device; + void __iomem *uar_map; + struct mlx4_uar priv_uar; + struct mlx4_mr mr; + u32 priv_pdn; + spinlock_t uar_lock; + u8 mac_removed[MLX4_MAX_PORTS + 1]; + rwlock_t clock_lock; + u32 nominal_c_mult; + struct cyclecounter cycles; + struct timecounter clock; + unsigned long last_overflow_check; + unsigned long overflow_period; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct notifier_block nb; +}; + + +struct mlx4_en_rss_map { + int base_qpn; + struct mlx4_qp qps[MAX_RX_RINGS]; + enum mlx4_qp_state state[MAX_RX_RINGS]; + struct mlx4_qp indir_qp; + enum mlx4_qp_state indir_state; +}; + +enum mlx4_en_port_flag { + MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */ + MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */ +}; + +struct mlx4_en_port_state { + int link_state; + int link_speed; + int transceiver; + u32 flags; +}; + +enum mlx4_en_mclist_act { + MCLIST_NONE, + MCLIST_REM, + MCLIST_ADD, +}; + +struct mlx4_en_mc_list { + struct list_head list; + enum mlx4_en_mclist_act action; + u8 addr[ETH_ALEN]; + u64 reg_id; + u64 tunnel_reg_id; +}; + +struct mlx4_en_frag_info { + u16 frag_size; + u16 frag_prefix_size; + u16 frag_stride; +}; + +#ifdef CONFIG_MLX4_EN_DCB +/* Minimal TC BW - setting to 0 will block traffic */ +#define MLX4_EN_BW_MIN 1 +#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ + +#define MLX4_EN_TC_ETS 7 + +#endif + +struct ethtool_flow_id { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; + u64 id; +}; + +enum { + MLX4_EN_FLAG_PROMISC = (1 << 0), + MLX4_EN_FLAG_MC_PROMISC = (1 << 1), + /* whether we need to enable hardware loopback by putting dmac + * in Tx WQE + */ + MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2), + /* whether we need to drop packets that hardware loopback-ed */ + MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), + MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), + MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), +}; + +#define PORT_BEACON_MAX_LIMIT (65535) +#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) +#define MLX4_EN_MAC_HASH_IDX 5 + +struct mlx4_en_stats_bitmap { + DECLARE_BITMAP(bitmap, NUM_ALL_STATS); + struct mutex mutex; /* for mutual access to stats bitmap */ +}; + +struct mlx4_en_priv { + struct mlx4_en_dev *mdev; + struct mlx4_en_port_profile *prof; + struct net_device *dev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device_stats stats; + struct net_device_stats ret_stats; + struct mlx4_en_port_state port_state; + spinlock_t stats_lock; + struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; + /* To allow rules removal while port is going down */ + struct list_head ethtool_list; + + unsigned long last_moder_packets[MAX_RX_RINGS]; + unsigned long last_moder_tx_packets; + unsigned long last_moder_bytes[MAX_RX_RINGS]; + unsigned long last_moder_jiffies; + int last_moder_time[MAX_RX_RINGS]; + u16 rx_usecs; + u16 rx_frames; + u16 tx_usecs; + u16 tx_frames; + u32 pkt_rate_low; + u16 rx_usecs_low; + u32 pkt_rate_high; + u16 rx_usecs_high; + u16 sample_interval; + u16 adaptive_rx_coal; + u32 msg_enable; + u32 loopback_ok; + u32 validate_loopback; + + struct mlx4_hwq_resources res; + int link_state; + int last_link_state; + bool port_up; + int port; + int registered; + int allocated; + int stride; + unsigned char current_mac[ETH_ALEN + 2]; + int mac_index; + unsigned max_mtu; + int base_qpn; + int cqe_factor; + int cqe_size; + + struct mlx4_en_rss_map rss_map; + __be32 ctrl_flags; + u32 flags; + u8 num_tx_rings_p_up; + u32 tx_work_limit; + u32 tx_ring_num; + u32 rx_ring_num; + u32 rx_skb_size; + struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; + u16 num_frags; + u16 log_rx_info; + + struct mlx4_en_tx_ring **tx_ring; + struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; + struct mlx4_en_cq **tx_cq; + struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; + struct mlx4_qp drop_qp; + struct work_struct rx_mode_task; + struct work_struct watchdog_task; + struct work_struct linkstate_task; + struct delayed_work stats_task; + struct delayed_work service_task; +#ifdef CONFIG_MLX4_EN_VXLAN + struct work_struct vxlan_add_task; + struct work_struct vxlan_del_task; +#endif + struct mlx4_en_perf_stats pstats; + struct mlx4_en_pkt_stats pkstats; + struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES]; + struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES]; + struct mlx4_en_flow_stats_rx rx_flowstats; + struct mlx4_en_flow_stats_tx tx_flowstats; + struct mlx4_en_port_stats port_stats; + struct mlx4_en_stats_bitmap stats_bitmap; + struct list_head mc_list; + struct list_head curr_list; + u64 broadcast_id; + struct mlx4_en_stat_out_mbox hw_stats; + int vids[128]; + bool wol; + struct device *ddev; + struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; + struct hwtstamp_config hwtstamp_config; + +#ifdef CONFIG_MLX4_EN_DCB + struct ieee_ets ets; + u16 maxrate[IEEE_8021QAZ_MAX_TCS]; + enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; +#endif +#ifdef CONFIG_RFS_ACCEL + spinlock_t filters_lock; + int last_filter_id; + struct list_head filters; + struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT]; +#endif + u64 tunnel_reg_id; + __be16 vxlan_port; + + u32 pflags; + u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; + u8 rss_hash_fn; +}; + +enum mlx4_en_wol { + MLX4_EN_WOL_MAGIC = (1ULL << 61), + MLX4_EN_WOL_ENABLED = (1ULL << 62), +}; + +struct mlx4_mac_entry { + struct hlist_node hlist; + unsigned char mac[ETH_ALEN + 2]; + u64 reg_id; + struct rcu_head rcu; +}; + +static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) +{ + return buf + idx * cqe_sz; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ + spin_lock_init(&cq->poll_lock); + cq->state = MLX4_EN_CQ_STATE_IDLE; +} + +/* called from the device poll rutine to get ownership of a cq */ +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock(&cq->poll_lock); + if (cq->state & MLX4_CQ_LOCKED) { + WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); + cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; + rc = false; + } else + /* we don't care if someone yielded */ + cq->state = MLX4_EN_CQ_STATE_NAPI; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* returns true is someone tried to get the cq while napi had it */ +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL | + MLX4_EN_CQ_STATE_NAPI_YIELD)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* called from mlx4_en_low_latency_poll() */ +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock_bh(&cq->poll_lock); + if ((cq->state & MLX4_CQ_LOCKED)) { + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; + + cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; + rc = false; + rx_ring->yields++; + } else + /* preserve yield marks */ + cq->state |= MLX4_EN_CQ_STATE_POLL; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* returns true if someone tried to get the cq while it was locked */ +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock_bh(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) +{ + WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); + return cq->state & CQ_USER_PEND; +} +#else +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ +} + +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + return true; +} + +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) +{ + return false; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) + +void mlx4_en_update_loopback_state(struct net_device *dev, + netdev_features_t features); + +void mlx4_en_destroy_netdev(struct net_device *dev); +int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + struct mlx4_en_port_profile *prof); + +int mlx4_en_start_port(struct net_device *dev); +void mlx4_en_stop_port(struct net_device *dev, int detach); + +void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause); + +void mlx4_en_free_resources(struct mlx4_en_priv *priv); +int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); + +int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, int node); +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq); +int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + int cq_idx); +void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); + +void mlx4_en_tx_irq(struct mlx4_cq *mcq); +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); +netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); + +int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring, + u32 size, u16 stride, + int node, int queue_index); +void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring); +int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int cq, int user_prio); +void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring); +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); +int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride, int node); +void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride); +int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); +void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring); +int mlx4_en_process_rx_cq(struct net_device *dev, + struct mlx4_en_cq *cq, + int budget); +int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); +void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, + int is_tx, int rss, int qpn, int cqn, int user_prio, + struct mlx4_qp_context *context); +void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); +int mlx4_en_map_buffer(struct mlx4_buf *buf); +void mlx4_en_unmap_buffer(struct mlx4_buf *buf); + +void mlx4_en_calc_rx_buf(struct net_device *dev); +int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); +void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); +int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv); +void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv); +int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); +void mlx4_en_rx_irq(struct mlx4_cq *mcq); + +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); +int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv); + +int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); +int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); + +#ifdef CONFIG_MLX4_EN_DCB +extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; +extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops; +#endif + +int mlx4_en_setup_tc(struct net_device *dev, u8 up); + +#ifdef CONFIG_RFS_ACCEL +void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); +#endif + +#define MLX4_EN_NUM_SELF_TEST 5 +void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); +void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); + +#define DEV_FEATURE_CHANGED(dev, new_features, feature) \ + ((dev->features & feature) ^ (new_features & feature)) + +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t new_features); +void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause); +int mlx4_en_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr); + +/* + * Functions for time stamping + */ +u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe); +void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, + struct skb_shared_hwtstamps *hwts, + u64 timestamp); +void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); +void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); + +/* Globals + */ +extern const struct ethtool_ops mlx4_en_ethtool_ops; + + + +/* + * printk / logging functions + */ + +__printf(3, 4) +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...); + +#define en_dbg(mlevel, priv, format, ...) \ +do { \ + if (NETIF_MSG_##mlevel & (priv)->msg_enable) \ + en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \ +} while (0) +#define en_warn(priv, format, ...) \ + en_print(KERN_WARNING, priv, format, ##__VA_ARGS__) +#define en_err(priv, format, ...) \ + en_print(KERN_ERR, priv, format, ##__VA_ARGS__) +#define en_info(priv, format, ...) \ + en_print(KERN_INFO, priv, format, ##__VA_ARGS__) + +#define mlx4_err(mdev, format, ...) \ + pr_err(DRV_NAME " %s: " format, \ + dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__) +#define mlx4_info(mdev, format, ...) \ + pr_info(DRV_NAME " %s: " format, \ + dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__) +#define mlx4_warn(mdev, format, ...) \ + pr_warn(DRV_NAME " %s: " format, \ + dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h new file mode 100644 index 000000000..00555832a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -0,0 +1,107 @@ +#ifndef _MLX4_STATS_ +#define _MLX4_STATS_ + +#ifdef MLX4_EN_PERF_STAT +#define NUM_PERF_STATS NUM_PERF_COUNTERS +#else +#define NUM_PERF_STATS 0 +#endif + +#define NUM_PRIORITIES 9 +#define NUM_PRIORITY_STATS 2 + +struct mlx4_en_pkt_stats { + unsigned long rx_multicast_packets; + unsigned long rx_broadcast_packets; + unsigned long rx_jabbers; + unsigned long rx_in_range_length_error; + unsigned long rx_out_range_length_error; + unsigned long tx_multicast_packets; + unsigned long tx_broadcast_packets; + unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; + unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; +#define NUM_PKT_STATS 43 +}; + +struct mlx4_en_port_stats { + unsigned long tso_packets; + unsigned long xmit_more; + unsigned long queue_stopped; + unsigned long wake_queue; + unsigned long tx_timeout; + unsigned long rx_alloc_failed; + unsigned long rx_chksum_good; + unsigned long rx_chksum_none; + unsigned long rx_chksum_complete; + unsigned long tx_chksum_offload; +#define NUM_PORT_STATS 10 +}; + +struct mlx4_en_perf_stats { + u32 tx_poll; + u64 tx_pktsz_avg; + u32 inflight_avg; + u16 tx_coal_avg; + u16 rx_coal_avg; + u32 napi_quota; +#define NUM_PERF_COUNTERS 6 +}; + +#define NUM_MAIN_STATS 21 + +#define MLX4_NUM_PRIORITIES 8 + +struct mlx4_en_flow_stats_rx { + u64 rx_pause; + u64 rx_pause_duration; + u64 rx_pause_transition; +#define NUM_FLOW_STATS_RX 3 +#define NUM_FLOW_PRIORITY_STATS_RX (NUM_FLOW_STATS_RX * \ + MLX4_NUM_PRIORITIES) +}; + +struct mlx4_en_flow_stats_tx { + u64 tx_pause; + u64 tx_pause_duration; + u64 tx_pause_transition; +#define NUM_FLOW_STATS_TX 3 +#define NUM_FLOW_PRIORITY_STATS_TX (NUM_FLOW_STATS_TX * \ + MLX4_NUM_PRIORITIES) +}; + +#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \ + NUM_FLOW_PRIORITY_STATS_TX + \ + NUM_FLOW_PRIORITY_STATS_RX) + +struct mlx4_en_stat_out_flow_control_mbox { + /* Total number of PAUSE frames received from the far-end port */ + __be64 rx_pause; + /* Total number of microseconds that far-end port requested to pause + * transmission of packets + */ + __be64 rx_pause_duration; + /* Number of received transmission from XOFF state to XON state */ + __be64 rx_pause_transition; + /* Total number of PAUSE frames sent from the far-end port */ + __be64 tx_pause; + /* Total time in microseconds that transmission of packets has been + * paused + */ + __be64 tx_pause_duration; + /* Number of transmitter transitions from XOFF state to XON state */ + __be64 tx_pause_transition; + /* Reserverd */ + __be64 reserved[2]; +}; + +enum { + MLX4_DUMP_ETH_STATS_FLOW_CONTROL = 1 << 12 +}; + +#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ + NUM_FLOW_STATS + NUM_PERF_STATS) + +#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ + sizeof(((struct net_device_stats *)0)->n)) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c new file mode 100644 index 000000000..78f51e103 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -0,0 +1,1158 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/vmalloc.h> + +#include <linux/mlx4/cmd.h> + +#include "mlx4.h" +#include "icm.h" + +static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) +{ + int o; + int m; + u32 seg; + + spin_lock(&buddy->lock); + + for (o = order; o <= buddy->max_order; ++o) + if (buddy->num_free[o]) { + m = 1 << (buddy->max_order - o); + seg = find_first_bit(buddy->bits[o], m); + if (seg < m) + goto found; + } + + spin_unlock(&buddy->lock); + return -1; + + found: + clear_bit(seg, buddy->bits[o]); + --buddy->num_free[o]; + + while (o > order) { + --o; + seg <<= 1; + set_bit(seg ^ 1, buddy->bits[o]); + ++buddy->num_free[o]; + } + + spin_unlock(&buddy->lock); + + seg <<= order; + + return seg; +} + +static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) +{ + seg >>= order; + + spin_lock(&buddy->lock); + + while (test_bit(seg ^ 1, buddy->bits[order])) { + clear_bit(seg ^ 1, buddy->bits[order]); + --buddy->num_free[order]; + seg >>= 1; + ++order; + } + + set_bit(seg, buddy->bits[order]); + ++buddy->num_free[order]; + + spin_unlock(&buddy->lock); +} + +static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) +{ + int i, s; + + buddy->max_order = max_order; + spin_lock_init(&buddy->lock); + + buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), + GFP_KERNEL); + buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, + GFP_KERNEL); + if (!buddy->bits || !buddy->num_free) + goto err_out; + + for (i = 0; i <= buddy->max_order; ++i) { + s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); + if (!buddy->bits[i]) { + buddy->bits[i] = vzalloc(s * sizeof(long)); + if (!buddy->bits[i]) + goto err_out_free; + } + } + + set_bit(0, buddy->bits[buddy->max_order]); + buddy->num_free[buddy->max_order] = 1; + + return 0; + +err_out_free: + for (i = 0; i <= buddy->max_order; ++i) + kvfree(buddy->bits[i]); + +err_out: + kfree(buddy->bits); + kfree(buddy->num_free); + + return -ENOMEM; +} + +static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) +{ + int i; + + for (i = 0; i <= buddy->max_order; ++i) + kvfree(buddy->bits[i]); + + kfree(buddy->bits); + kfree(buddy->num_free); +} + +u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + u32 seg; + int seg_order; + u32 offset; + + seg_order = max_t(int, order - log_mtts_per_seg, 0); + + seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); + if (seg == -1) + return -1; + + offset = seg * (1 << log_mtts_per_seg); + + if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, + offset + (1 << order) - 1)) { + mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); + return -1; + } + + return offset; +} + +static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +{ + u64 in_param = 0; + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, order); + err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (err) + return -1; + return get_param_l(&out_param); + } + return __mlx4_alloc_mtt_range(dev, order); +} + +int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, + struct mlx4_mtt *mtt) +{ + int i; + + if (!npages) { + mtt->order = -1; + mtt->page_shift = MLX4_ICM_PAGE_SHIFT; + return 0; + } else + mtt->page_shift = page_shift; + + for (mtt->order = 0, i = 1; i < npages; i <<= 1) + ++mtt->order; + + mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); + if (mtt->offset == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mtt_init); + +void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) +{ + u32 first_seg; + int seg_order; + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + seg_order = max_t(int, order - log_mtts_per_seg, 0); + first_seg = offset / (1 << log_mtts_per_seg); + + mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); + mlx4_table_put_range(dev, &mr_table->mtt_table, offset, + offset + (1 << order) - 1); +} + +static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, offset); + set_param_h(&in_param, order); + err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n", + offset, order); + return; + } + __mlx4_free_mtt_range(dev, offset, order); +} + +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +{ + if (mtt->order < 0) + return; + + mlx4_free_mtt_range(dev, mtt->offset, mtt->order); +} +EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); + +u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +{ + return (u64) mtt->offset * dev->caps.mtt_entry_sz; +} +EXPORT_SYMBOL_GPL(mlx4_mtt_addr); + +static u32 hw_index_to_key(u32 ind) +{ + return (ind >> 24) | (ind << 8); +} + +static u32 key_to_hw_index(u32 key) +{ + return (key << 24) | (key >> 8); +} + +static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int mpt_index) +{ + return mlx4_cmd(dev, mailbox->dma, mpt_index, + 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int mpt_index) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, + !mailbox, MLX4_CMD_HW2SW_MPT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); +} + +/* Must protect against concurrent access */ +int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry ***mpt_entry) +{ + int err; + int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); + struct mlx4_cmd_mailbox *mailbox = NULL; + + if (mmr->enabled != MLX4_MPT_EN_HW) + return -EINVAL; + + err = mlx4_HW2SW_MPT(dev, NULL, key); + if (err) { + mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); + mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); + return err; + } + + mmr->enabled = MLX4_MPT_EN_SW; + + if (!mlx4_is_mfunc(dev)) { + **mpt_entry = mlx4_table_find( + &mlx4_priv(dev)->mr_table.dmpt_table, + key, NULL); + } else { + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR_OR_NULL(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, key, + 0, MLX4_CMD_QUERY_MPT, + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto free_mailbox; + + *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; + } + + if (!(*mpt_entry) || !(**mpt_entry)) { + err = -ENOMEM; + goto free_mailbox; + } + + return 0; + +free_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); + +int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry **mpt_entry) +{ + int err; + + if (!mlx4_is_mfunc(dev)) { + /* Make sure any changes to this entry are flushed */ + wmb(); + + *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; + + /* Make sure the new status is written */ + wmb(); + + err = mlx4_SYNC_TPT(dev); + } else { + int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); + + struct mlx4_cmd_mailbox *mailbox = + container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, + buf); + + err = mlx4_SW2HW_MPT(dev, mailbox, key); + } + + if (!err) { + mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; + mmr->enabled = MLX4_MPT_EN_HW; + } + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); + +void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, + struct mlx4_mpt_entry **mpt_entry) +{ + if (mlx4_is_mfunc(dev)) { + struct mlx4_cmd_mailbox *mailbox = + container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, + buf); + mlx4_free_cmd_mailbox(dev, mailbox); + } +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); + +int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, + u32 pdn) +{ + u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; + /* The wrapper function will put the slave's id here */ + if (mlx4_is_mfunc(dev)) + pd_flags &= ~MLX4_MPT_PD_VF_MASK; + + mpt_entry->pd_flags = cpu_to_be32(pd_flags | + (pdn & MLX4_MPT_PD_MASK) + | MLX4_MPT_PD_FLAG_EN_INV); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); + +int mlx4_mr_hw_change_access(struct mlx4_dev *dev, + struct mlx4_mpt_entry *mpt_entry, + u32 access) +{ + u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | + (access & MLX4_PERM_MASK); + + mpt_entry->flags = cpu_to_be32(flags); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); + +static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, + u64 iova, u64 size, u32 access, int npages, + int page_shift, struct mlx4_mr *mr) +{ + mr->iova = iova; + mr->size = size; + mr->pd = pd; + mr->access = access; + mr->enabled = MLX4_MPT_DISABLED; + mr->key = hw_index_to_key(mridx); + + return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); +} + +static int mlx4_WRITE_MTT(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox, + int num_entries) +{ + return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +int __mlx4_mpt_reserve(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); +} + +static int mlx4_mpt_reserve(struct mlx4_dev *dev) +{ + u64 out_param; + + if (mlx4_is_mfunc(dev)) { + if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + return -1; + return get_param_l(&out_param); + } + return __mlx4_mpt_reserve(dev); +} + +void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR); +} + +static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, index); + if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to release mr index:%d\n", + index); + return; + } + __mlx4_mpt_release(dev, index); +} + +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp); +} + +static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) +{ + u64 param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(¶m, index); + return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + } + return __mlx4_mpt_alloc_icm(dev, index, gfp); +} + +void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + mlx4_table_put(dev, &mr_table->dmpt_table, index); +} + +static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, index); + if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to free icm of mr index:%d\n", + index); + return; + } + return __mlx4_mpt_free_icm(dev, index); +} + +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr) +{ + u32 index; + int err; + + index = mlx4_mpt_reserve(dev); + if (index == -1) + return -ENOMEM; + + err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, + access, npages, page_shift, mr); + if (err) + mlx4_mpt_release(dev, index); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_alloc); + +static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + int err; + + if (mr->enabled == MLX4_MPT_EN_HW) { + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(mr->key) & + (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n", + err); + return err; + } + + mr->enabled = MLX4_MPT_EN_SW; + } + mlx4_mtt_cleanup(dev, &mr->mtt); + + return 0; +} + +int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + int ret; + + ret = mlx4_mr_free_reserved(dev, mr); + if (ret) + return ret; + if (mr->enabled) + mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); + mlx4_mpt_release(dev, key_to_hw_index(mr->key)); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_free); + +void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + mlx4_mtt_cleanup(dev, &mr->mtt); + mr->mtt.order = -1; +} +EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); + +int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, + u64 iova, u64 size, int npages, + int page_shift, struct mlx4_mpt_entry *mpt_entry) +{ + int err; + + err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); + if (err) + return err; + + mpt_entry->start = cpu_to_be64(iova); + mpt_entry->length = cpu_to_be64(size); + mpt_entry->entity_size = cpu_to_be32(page_shift); + mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE | + MLX4_MPT_FLAG_SW_OWNS)); + if (mr->mtt.order < 0) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); + mpt_entry->mtt_addr = 0; + } else { + mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, + &mr->mtt)); + if (mr->mtt.page_shift == 0) + mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); + } + if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { + /* fast register MR in free state */ + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | + MLX4_MPT_PD_FLAG_RAE); + } else { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); + } + mr->enabled = MLX4_MPT_EN_SW; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); + +int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mpt_entry *mpt_entry; + int err; + + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL); + if (err) + return err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_table; + } + mpt_entry = mailbox->buf; + mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | + MLX4_MPT_FLAG_REGION | + mr->access); + + mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); + mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); + mpt_entry->start = cpu_to_be64(mr->iova); + mpt_entry->length = cpu_to_be64(mr->size); + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); + + if (mr->mtt.order < 0) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); + mpt_entry->mtt_addr = 0; + } else { + mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, + &mr->mtt)); + } + + if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { + /* fast register MR in free state */ + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | + MLX4_MPT_PD_FLAG_RAE); + mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); + } else { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); + } + + err = mlx4_SW2HW_MPT(dev, mailbox, + key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_cmd; + } + mr->enabled = MLX4_MPT_EN_HW; + + mlx4_free_cmd_mailbox(dev, mailbox); + + return 0; + +err_cmd: + mlx4_free_cmd_mailbox(dev, mailbox); + +err_table: + mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_enable); + +static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + __be64 *mtts; + dma_addr_t dma_handle; + int i; + + mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + + start_index, &dma_handle); + + if (!mtts) + return -ENOMEM; + + dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, + npages * sizeof (u64), DMA_TO_DEVICE); + + for (i = 0; i < npages; ++i) + mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); + + dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, + npages * sizeof (u64), DMA_TO_DEVICE); + + return 0; +} + +int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + int err = 0; + int chunk; + int mtts_per_page; + int max_mtts_first_page; + + /* compute how may mtts fit in the first page */ + mtts_per_page = PAGE_SIZE / sizeof(u64); + max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) + % mtts_per_page; + + chunk = min_t(int, max_mtts_first_page, npages); + + while (npages > 0) { + err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); + if (err) + return err; + npages -= chunk; + start_index += chunk; + page_list += chunk; + + chunk = min_t(int, mtts_per_page, npages); + } + return err; +} + +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + struct mlx4_cmd_mailbox *mailbox = NULL; + __be64 *inbox = NULL; + int chunk; + int err = 0; + int i; + + if (mtt->order < 0) + return -EINVAL; + + if (mlx4_is_mfunc(dev)) { + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + while (npages > 0) { + chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, + npages); + inbox[0] = cpu_to_be64(mtt->offset + start_index); + inbox[1] = 0; + for (i = 0; i < chunk; ++i) + inbox[i + 2] = cpu_to_be64(page_list[i] | + MLX4_MTT_FLAG_PRESENT); + err = mlx4_WRITE_MTT(dev, mailbox, chunk); + if (err) { + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + npages -= chunk; + start_index += chunk; + page_list += chunk; + } + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); +} +EXPORT_SYMBOL_GPL(mlx4_write_mtt); + +int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_buf *buf, gfp_t gfp) +{ + u64 *page_list; + int err; + int i; + + page_list = kmalloc(buf->npages * sizeof *page_list, + gfp); + if (!page_list) + return -ENOMEM; + + for (i = 0; i < buf->npages; ++i) + if (buf->nbufs == 1) + page_list[i] = buf->direct.map + (i << buf->page_shift); + else + page_list[i] = buf->page_list[i].map; + + err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); + + kfree(page_list); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); + +int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, + struct mlx4_mw *mw) +{ + u32 index; + + if ((type == MLX4_MW_TYPE_1 && + !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || + (type == MLX4_MW_TYPE_2 && + !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) + return -ENOTSUPP; + + index = mlx4_mpt_reserve(dev); + if (index == -1) + return -ENOMEM; + + mw->key = hw_index_to_key(index); + mw->pd = pd; + mw->type = type; + mw->enabled = MLX4_MPT_DISABLED; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mw_alloc); + +int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mpt_entry *mpt_entry; + int err; + + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL); + if (err) + return err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_table; + } + mpt_entry = mailbox->buf; + + /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned + * off, thus creating a memory window and not a memory region. + */ + mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); + mpt_entry->pd_flags = cpu_to_be32(mw->pd); + if (mw->type == MLX4_MW_TYPE_2) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); + } + + err = mlx4_SW2HW_MPT(dev, mailbox, + key_to_hw_index(mw->key) & + (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_cmd; + } + mw->enabled = MLX4_MPT_EN_HW; + + mlx4_free_cmd_mailbox(dev, mailbox); + + return 0; + +err_cmd: + mlx4_free_cmd_mailbox(dev, mailbox); + +err_table: + mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mw_enable); + +void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) +{ + int err; + + if (mw->enabled == MLX4_MPT_EN_HW) { + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(mw->key) & + (dev->caps.num_mpts - 1)); + if (err) + mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); + + mw->enabled = MLX4_MPT_EN_SW; + } + if (mw->enabled) + mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); + mlx4_mpt_release(dev, key_to_hw_index(mw->key)); +} +EXPORT_SYMBOL_GPL(mlx4_mw_free); + +int mlx4_init_mr_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mr_table *mr_table = &priv->mr_table; + int err; + + /* Nothing to do for slaves - all MR handling is forwarded + * to the master */ + if (mlx4_is_slave(dev)) + return 0; + + if (!is_power_of_2(dev->caps.num_mpts)) + return -EINVAL; + + err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, + ~0, dev->caps.reserved_mrws, 0); + if (err) + return err; + + err = mlx4_buddy_init(&mr_table->mtt_buddy, + ilog2((u32)dev->caps.num_mtts / + (1 << log_mtts_per_seg))); + if (err) + goto err_buddy; + + if (dev->caps.reserved_mtts) { + priv->reserved_mtts = + mlx4_alloc_mtt_range(dev, + fls(dev->caps.reserved_mtts - 1)); + if (priv->reserved_mtts < 0) { + mlx4_warn(dev, "MTT table of order %u is too small\n", + mr_table->mtt_buddy.max_order); + err = -ENOMEM; + goto err_reserve_mtts; + } + } + + return 0; + +err_reserve_mtts: + mlx4_buddy_cleanup(&mr_table->mtt_buddy); + +err_buddy: + mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); + + return err; +} + +void mlx4_cleanup_mr_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mr_table *mr_table = &priv->mr_table; + + if (mlx4_is_slave(dev)) + return; + if (priv->reserved_mtts >= 0) + mlx4_free_mtt_range(dev, priv->reserved_mtts, + fls(dev->caps.reserved_mtts - 1)); + mlx4_buddy_cleanup(&mr_table->mtt_buddy); + mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); +} + +static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova) +{ + int i, page_mask; + + if (npages > fmr->max_pages) + return -EINVAL; + + page_mask = (1 << fmr->page_shift) - 1; + + /* We are getting page lists, so va must be page aligned. */ + if (iova & page_mask) + return -EINVAL; + + /* Trust the user not to pass misaligned data in page_list */ + if (0) + for (i = 0; i < npages; ++i) { + if (page_list[i] & ~page_mask) + return -EINVAL; + } + + if (fmr->maps >= fmr->max_maps) + return -EINVAL; + + return 0; +} + +int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova, u32 *lkey, u32 *rkey) +{ + u32 key; + int i, err; + + err = mlx4_check_fmr(fmr, page_list, npages, iova); + if (err) + return err; + + ++fmr->maps; + + key = key_to_hw_index(fmr->mr.key); + key += dev->caps.num_mpts; + *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); + + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; + + /* Make sure MPT status is visible before writing MTT entries */ + wmb(); + + dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); + + for (i = 0; i < npages; ++i) + fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); + + dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); + + fmr->mpt->key = cpu_to_be32(key); + fmr->mpt->lkey = cpu_to_be32(key); + fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); + fmr->mpt->start = cpu_to_be64(iova); + + /* Make MTT entries are visible before setting MPT status */ + wmb(); + + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; + + /* Make sure MPT status is visible before consumer can use FMR */ + wmb(); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); + +int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, + int max_maps, u8 page_shift, struct mlx4_fmr *fmr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err = -ENOMEM; + + if (max_maps > dev->caps.max_fmr_maps) + return -EINVAL; + + if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) + return -EINVAL; + + /* All MTTs must fit in the same page */ + if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) + return -EINVAL; + + fmr->page_shift = page_shift; + fmr->max_pages = max_pages; + fmr->max_maps = max_maps; + fmr->maps = 0; + + err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, + page_shift, &fmr->mr); + if (err) + return err; + + fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, + fmr->mr.mtt.offset, + &fmr->dma_handle); + + if (!fmr->mtts) { + err = -ENOMEM; + goto err_free; + } + + return 0; + +err_free: + (void) mlx4_mr_free(dev, &fmr->mr); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); + +int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + err = mlx4_mr_enable(dev, &fmr->mr); + if (err) + return err; + + fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, + key_to_hw_index(fmr->mr.key), NULL); + if (!fmr->mpt) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_enable); + +void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, + u32 *lkey, u32 *rkey) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + + if (!fmr->maps) + return; + + fmr->maps = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err); + return; + } + + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(fmr->mr.key) & + (dev->caps.num_mpts - 1)); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) { + pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err); + return; + } + fmr->mr.enabled = MLX4_MPT_EN_SW; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); + +int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) +{ + int ret; + + if (fmr->maps) + return -EBUSY; + + ret = mlx4_mr_free(dev, &fmr->mr); + if (ret) + return ret; + fmr->mr.enabled = MLX4_MPT_DISABLED; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_free); + +int mlx4_SYNC_TPT(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} +EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c new file mode 100644 index 000000000..609c59dc8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/io-mapping.h> + +#include <asm/page.h> + +#include "mlx4.h" +#include "icm.h" + +enum { + MLX4_NUM_RESERVED_UARS = 8 +}; + +int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); + if (*pdn == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_pd_alloc); + +void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR); +} +EXPORT_SYMBOL_GPL(mlx4_pd_free); + +int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap); + if (*xrcdn == -1) + return -ENOMEM; + + return 0; +} + +int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, + RES_XRCD, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + + *xrcdn = get_param_l(&out_param); + return 0; + } + return __mlx4_xrcd_alloc(dev, xrcdn); +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); + +void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR); +} + +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, xrcdn); + err = mlx4_cmd(dev, in_param, RES_XRCD, + RES_OP_RESERVE, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn); + } else + __mlx4_xrcd_free(dev, xrcdn); +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_free); + +int mlx4_init_pd_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, + (1 << NOT_MASKED_PD_BITS) - 1, + dev->caps.reserved_pds, 0); +} + +void mlx4_cleanup_pd_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); +} + +int mlx4_init_xrcd_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), + (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0); +} + +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap); +} + +int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) +{ + int offset; + + uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap); + if (uar->index == -1) + return -ENOMEM; + + if (mlx4_is_slave(dev)) + offset = uar->index % ((int)pci_resource_len(dev->persist->pdev, + 2) / + dev->caps.uar_page_size); + else + offset = uar->index; + uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT) + + offset; + uar->map = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_uar_alloc); + +void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR); +} +EXPORT_SYMBOL_GPL(mlx4_uar_free); + +int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_uar *uar; + int err = 0; + int idx; + + if (!priv->bf_mapping) + return -ENOMEM; + + mutex_lock(&priv->bf_mutex); + if (!list_empty(&priv->bf_list)) + uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list); + else { + if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) { + err = -ENOMEM; + goto out; + } + uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node); + if (!uar) { + uar = kmalloc(sizeof(*uar), GFP_KERNEL); + if (!uar) { + err = -ENOMEM; + goto out; + } + } + err = mlx4_uar_alloc(dev, uar); + if (err) + goto free_kmalloc; + + uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) { + err = -ENOMEM; + goto free_uar; + } + + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + if (!uar->bf_map) { + err = -ENOMEM; + goto unamp_uar; + } + uar->free_bf_bmap = 0; + list_add(&uar->bf_list, &priv->bf_list); + } + + idx = ffz(uar->free_bf_bmap); + uar->free_bf_bmap |= 1 << idx; + bf->uar = uar; + bf->offset = 0; + bf->buf_size = dev->caps.bf_reg_size / 2; + bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size; + if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1) + list_del_init(&uar->bf_list); + + goto out; + +unamp_uar: + bf->uar = NULL; + iounmap(uar->map); + +free_uar: + mlx4_uar_free(dev, uar); + +free_kmalloc: + kfree(uar); + +out: + mutex_unlock(&priv->bf_mutex); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_bf_alloc); + +void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int idx; + + if (!bf->uar || !bf->uar->bf_map) + return; + + mutex_lock(&priv->bf_mutex); + idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size; + bf->uar->free_bf_bmap &= ~(1 << idx); + if (!bf->uar->free_bf_bmap) { + if (!list_empty(&bf->uar->bf_list)) + list_del(&bf->uar->bf_list); + + io_mapping_unmap(bf->uar->bf_map); + iounmap(bf->uar->map); + mlx4_uar_free(dev, bf->uar); + kfree(bf->uar); + } else if (list_empty(&bf->uar->bf_list)) + list_add(&bf->uar->bf_list, &priv->bf_list); + + mutex_unlock(&priv->bf_mutex); +} +EXPORT_SYMBOL_GPL(mlx4_bf_free); + +int mlx4_init_uar_table(struct mlx4_dev *dev) +{ + if (dev->caps.num_uars <= 128) { + mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", + dev->caps.num_uars); + mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); + return -ENODEV; + } + + return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, + dev->caps.num_uars, dev->caps.num_uars - 1, + dev->caps.reserved_uars, 0); +} + +void mlx4_cleanup_uar_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c new file mode 100644 index 000000000..c2b21313d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -0,0 +1,1445 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/export.h> + +#include <linux/mlx4/cmd.h> + +#include "mlx4.h" +#include "mlx4_stats.h" + +#define MLX4_MAC_VALID (1ull << 63) + +#define MLX4_VLAN_VALID (1u << 31) +#define MLX4_VLAN_MASK 0xfff + +#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL +#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL +#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL +#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL + +#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 +#define MLX4_IGNORE_FCS_MASK 0x1 + +void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = 1 << dev->caps.log_num_macs; + table->total = 0; +} + +void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; + table->total = 0; +} + +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) + memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); +} + +static int validate_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, int index) +{ + int err = 0; + + if (index < 0 || index >= table->max || !table->entries[index]) { + mlx4_warn(dev, "No valid Mac entry for the given index\n"); + err = -EINVAL; + } + return err; +} + +static int find_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, u64 mac) +{ + int i; + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (table->refs[i] && + (MLX4_MAC_MASK & mac) == + (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) + return i; + } + /* Mac not found */ + return -EINVAL; +} + +static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, + __be64 *entries) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); + + in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; + + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int i; + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!table->refs[i]) + continue; + + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + *idx = i; + return 0; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); + +int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int i, err = 0; + int free = -1; + + mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", + (unsigned long long) mac, port); + + mutex_lock(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!table->refs[i]) { + if (free < 0) + free = i; + continue; + } + + if ((MLX4_MAC_MASK & mac) == + (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + /* MAC already registered, increment ref count */ + err = i; + ++table->refs[i]; + goto out; + } + } + + mlx4_dbg(dev, "Free MAC index is %d\n", free); + + if (table->total == table->max) { + /* No free mac entries */ + err = -ENOSPC; + goto out; + } + + /* Register new MAC */ + table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", + (unsigned long long) mac); + table->entries[free] = 0; + goto out; + } + table->refs[free] = 1; + err = free; + ++table->total; +out: + mutex_unlock(&table->mutex); + return err; +} +EXPORT_SYMBOL_GPL(__mlx4_register_mac); + +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + u64 out_param = 0; + int err = -EINVAL; + + if (mlx4_is_mfunc(dev)) { + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { + err = mlx4_cmd_imm(dev, mac, &out_param, + ((u32) port) << 8 | (u32) RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } + if (err && err == -EINVAL && mlx4_is_slave(dev)) { + /* retry using old REG_MAC format */ + set_param_l(&out_param, port); + err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + dev->flags |= MLX4_FLAG_OLD_REG_MAC; + } + if (err) + return err; + + return get_param_l(&out_param); + } + return __mlx4_register_mac(dev, port, mac); +} +EXPORT_SYMBOL_GPL(mlx4_register_mac); + +int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port) +{ + return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + + (port - 1) * (1 << dev->caps.log_num_macs); +} +EXPORT_SYMBOL_GPL(mlx4_get_base_qpn); + +void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + struct mlx4_port_info *info; + struct mlx4_mac_table *table; + int index; + + if (port < 1 || port > dev->caps.num_ports) { + mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); + return; + } + info = &mlx4_priv(dev)->port[port]; + table = &info->mac_table; + mutex_lock(&table->mutex); + index = find_index(dev, table, mac); + + if (validate_index(dev, table, index)) + goto out; + if (--table->refs[index]) { + mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", + index); + goto out; + } + + table->entries[index] = 0; + mlx4_set_port_mac_table(dev, port, table->entries); + --table->total; +out: + mutex_unlock(&table->mutex); +} +EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); + +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + u64 out_param = 0; + + if (mlx4_is_mfunc(dev)) { + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { + (void) mlx4_cmd_imm(dev, mac, &out_param, + ((u32) port) << 8 | (u32) RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } else { + /* use old unregister mac format */ + set_param_l(&out_param, port); + (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } + return; + } + __mlx4_unregister_mac(dev, port, mac); + return; +} +EXPORT_SYMBOL_GPL(mlx4_unregister_mac); + +int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int index = qpn - info->base_qpn; + int err = 0; + + /* CX1 doesn't support multi-functions */ + mutex_lock(&table->mutex); + + err = validate_index(dev, table, index); + if (err) + goto out; + + table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", + (unsigned long long) new_mac); + table->entries[index] = 0; + } +out: + mutex_unlock(&table->mutex); + return err; +} +EXPORT_SYMBOL_GPL(__mlx4_replace_mac); + +static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, + __be32 *entries) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); + in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) +{ + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i; + + for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { + if (table->refs[i] && + (vid == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* VLAN already registered, increase reference count */ + *idx = i; + return 0; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); + +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, + int *index) +{ + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i, err = 0; + int free = -1; + + mutex_lock(&table->mutex); + + if (table->total == table->max) { + /* No free vlan entries */ + err = -ENOSPC; + goto out; + } + + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { + if (free < 0 && (table->refs[i] == 0)) { + free = i; + continue; + } + + if (table->refs[i] && + (vlan == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* Vlan already registered, increase references count */ + *index = i; + ++table->refs[i]; + goto out; + } + } + + if (free < 0) { + err = -ENOMEM; + goto out; + } + + /* Register new VLAN */ + table->refs[free] = 1; + table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); + + err = mlx4_set_port_vlan_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); + table->refs[free] = 0; + table->entries[free] = 0; + goto out; + } + + *index = free; + ++table->total; +out: + mutex_unlock(&table->mutex); + return err; +} + +int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) +{ + u64 out_param = 0; + int err; + + if (vlan > 4095) + return -EINVAL; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, vlan, &out_param, + ((u32) port) << 8 | (u32) RES_VLAN, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *index = get_param_l(&out_param); + + return err; + } + return __mlx4_register_vlan(dev, port, vlan, index); +} +EXPORT_SYMBOL_GPL(mlx4_register_vlan); + +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) +{ + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int index; + + mutex_lock(&table->mutex); + if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { + mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); + goto out; + } + + if (index < MLX4_VLAN_REGULAR) { + mlx4_warn(dev, "Trying to free special vlan index %d\n", index); + goto out; + } + + if (--table->refs[index]) { + mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", + table->refs[index], index); + goto out; + } + table->entries[index] = 0; + mlx4_set_port_vlan_table(dev, port, table->entries); + --table->total; +out: + mutex_unlock(&table->mutex); +} + +void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) +{ + u64 out_param = 0; + + if (mlx4_is_mfunc(dev)) { + (void) mlx4_cmd_imm(dev, vlan, &out_param, + ((u32) port) << 8 | (u32) RES_VLAN, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + return; + } + __mlx4_unregister_vlan(dev, port, vlan); +} +EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); + +int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) +{ + struct mlx4_cmd_mailbox *inmailbox, *outmailbox; + u8 *inbuf, *outbuf; + int err; + + inmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + + outmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outmailbox)) { + mlx4_free_cmd_mailbox(dev, inmailbox); + return PTR_ERR(outmailbox); + } + + inbuf = inmailbox->buf; + outbuf = outmailbox->buf; + inbuf[0] = 1; + inbuf[1] = 1; + inbuf[2] = 1; + inbuf[3] = 1; + *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); + *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); + + err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (!err) + *caps = *(__be32 *) (outbuf + 84); + mlx4_free_cmd_mailbox(dev, inmailbox); + mlx4_free_cmd_mailbox(dev, outmailbox); + return err; +} +static struct mlx4_roce_gid_entry zgid_entry; + +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) +{ + int vfs; + int slave_gid = slave; + unsigned i; + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return MLX4_ROCE_PF_GIDS; + + /* Slave is a VF */ + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; + if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) + return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; + return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; +} + +int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) +{ + int gids; + unsigned i; + int slave_gid = slave; + int vfs; + + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return 0; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; + if (slave_gid <= gids % vfs) + return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); + + return MLX4_ROCE_PF_GIDS + (gids % vfs) + + ((gids / vfs) * (slave_gid - 1)); +} +EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); + +static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, + int port, struct mlx4_cmd_mailbox *mailbox) +{ + struct mlx4_roce_gid_entry *gid_entry_mbox; + struct mlx4_priv *priv = mlx4_priv(dev); + int num_gids, base, offset; + int i, err; + + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + + mutex_lock(&(priv->port[port].gid_table.mutex)); + /* Zero-out gids belonging to that slave in the port GID table */ + for (i = 0, offset = base; i < num_gids; offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, mailbox->dma, + ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), + MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; +} + + +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + struct mlx4_cmd_mailbox *mailbox; + int num_eth_ports, err; + int i; + + if (slave < 0 || slave > dev->persist->num_vfs) + return; + + actv_ports = mlx4_get_active_ports(dev, slave); + + for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + num_eth_ports++; + } + } + + if (!num_eth_ports) + return; + + /* have ETH ports. Alloc mailbox for SET_PORT command */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return; + + for (i = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); + if (err) + mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n", + slave, i + 1, err); + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return; +} + +static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, + u8 op_mod, struct mlx4_cmd_mailbox *inbox) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_port_info *port_info; + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + struct mlx4_set_port_rqp_calc_context *qpn_context; + struct mlx4_set_port_general_context *gen_context; + struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; + int reset_qkey_viols; + int port; + int is_eth; + int num_gids; + int base; + u32 in_modifier; + u32 promisc; + u16 mtu, prev_mtu; + int err; + int i, j; + int offset; + __be32 agg_cap_mask; + __be32 slave_cap_mask; + __be32 new_cap_mask; + + port = in_mod & 0xff; + in_modifier = in_mod >> 8; + is_eth = op_mod; + port_info = &priv->port[port]; + + /* Slaves cannot perform SET_PORT operations except changing MTU */ + if (is_eth) { + if (slave != dev->caps.function && + in_modifier != MLX4_SET_PORT_GENERAL && + in_modifier != MLX4_SET_PORT_GID_TABLE) { + mlx4_warn(dev, "denying SET_PORT for slave:%d\n", + slave); + return -EINVAL; + } + switch (in_modifier) { + case MLX4_SET_PORT_RQP_CALC: + qpn_context = inbox->buf; + qpn_context->base_qpn = + cpu_to_be32(port_info->base_qpn); + qpn_context->n_mac = 0x7; + promisc = be32_to_cpu(qpn_context->promisc) >> + SET_PORT_PROMISC_SHIFT; + qpn_context->promisc = cpu_to_be32( + promisc << SET_PORT_PROMISC_SHIFT | + port_info->base_qpn); + promisc = be32_to_cpu(qpn_context->mcast) >> + SET_PORT_MC_PROMISC_SHIFT; + qpn_context->mcast = cpu_to_be32( + promisc << SET_PORT_MC_PROMISC_SHIFT | + port_info->base_qpn); + break; + case MLX4_SET_PORT_GENERAL: + gen_context = inbox->buf; + /* Mtu is configured as the max MTU among all the + * the functions on the port. */ + mtu = be16_to_cpu(gen_context->mtu); + mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + prev_mtu = slave_st->mtu[port]; + slave_st->mtu[port] = mtu; + if (mtu > master->max_mtu[port]) + master->max_mtu[port] = mtu; + if (mtu < prev_mtu && prev_mtu == + master->max_mtu[port]) { + slave_st->mtu[port] = mtu; + master->max_mtu[port] = mtu; + for (i = 0; i < dev->num_slaves; i++) { + master->max_mtu[port] = + max(master->max_mtu[port], + master->slave_state[i].mtu[port]); + } + } + + gen_context->mtu = cpu_to_be16(master->max_mtu[port]); + break; + case MLX4_SET_PORT_GID_TABLE: + /* change to MULTIPLE entries: number of guest's gids + * need a FOR-loop here over number of gids the guest has. + * 1. Check no duplicates in gids passed by slave + */ + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < num_gids; gid_entry_mbox++, i++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + gid_entry_mb1 = gid_entry_mbox + 1; + for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) { + if (!memcmp(gid_entry_mb1->raw, + zgid_entry.raw, sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw, + sizeof(gid_entry_mbox->raw))) { + /* found duplicate */ + return -EINVAL; + } + } + } + + /* 2. Check that do not have duplicates in OTHER + * entries in the port GID table + */ + + mutex_lock(&(priv->port[port].gid_table.mutex)); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (i >= base && i < base + num_gids) + continue; /* don't compare to slave's current gids */ + gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; + if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) + continue; + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (j = 0; j < num_gids; gid_entry_mbox++, j++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, + sizeof(gid_entry_tbl->raw))) { + /* found duplicate */ + mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", + slave, i); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return -EINVAL; + } + } + } + + /* insert slave GIDs with memcpy, starting at slave's base index */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to current mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; + } + + return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + } + + /* Slaves are not allowed to SET_PORT beacon (LED) blink */ + if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) { + mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave); + return -EPERM; + } + + /* For IB, we only consider: + * - The capability mask, which is set to the aggregate of all + * slave function capabilities + * - The QKey violatin counter - reset according to each request. + */ + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40; + new_cap_mask = ((__be32 *) inbox->buf)[2]; + } else { + reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1; + new_cap_mask = ((__be32 *) inbox->buf)[1]; + } + + /* slave may not set the IS_SM capability for the port */ + if (slave != mlx4_master_func_num(dev) && + (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM)) + return -EINVAL; + + /* No DEV_MGMT in multifunc mode */ + if (mlx4_is_mfunc(dev) && + (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP)) + return -EINVAL; + + agg_cap_mask = 0; + slave_cap_mask = + priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; + priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; + for (i = 0; i < dev->num_slaves; i++) + agg_cap_mask |= + priv->mfunc.master.slave_state[i].ib_cap_mask[port]; + + /* only clear mailbox for guests. Master may be setting + * MTU or PKEY table size + */ + if (slave != dev->caps.function) + memset(inbox->buf, 0, 256); + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; + ((__be32 *) inbox->buf)[2] = agg_cap_mask; + } else { + ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; + ((__be32 *) inbox->buf)[1] = agg_cap_mask; + } + + err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = + slave_cap_mask; + return err; +} + +int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier & 0xFF); + + if (port < 0) + return -EINVAL; + + vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | + (port & 0xFF); + + return mlx4_common_set_port(dev, slave, vhcr->in_modifier, + vhcr->op_modifier, inbox); +} + +/* bit locations for set port command with zero op modifier */ +enum { + MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ + MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ + MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, + MLX4_CHANGE_PORT_VL_CAP = 21, + MLX4_CHANGE_PORT_MTU_CAP = 22, +}; + +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) +{ + struct mlx4_cmd_mailbox *mailbox; + int err, vl_cap, pkey_tbl_flag = 0; + + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; + + if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { + pkey_tbl_flag = 1; + ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); + } + + /* IB VL CAP enum isn't used by the firmware, just numerical values */ + for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { + ((__be32 *) mailbox->buf)[0] = cpu_to_be32( + (1 << MLX4_CHANGE_PORT_MTU_CAP) | + (1 << MLX4_CHANGE_PORT_VL_CAP) | + (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | + (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | + (vl_cap << MLX4_SET_PORT_VL_CAP)); + err = mlx4_cmd(dev, mailbox->dma, port, + MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + if (err != -ENOMEM) + break; + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, + u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + int err; + u32 in_mod; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->flags = SET_PORT_GEN_ALL_VALID; + context->mtu = cpu_to_be16(mtu); + context->pptx = (pptx * (!pfctx)) << 7; + context->pfctx = pfctx; + context->pprx = (pprx * (!pfcrx)) << 7; + context->pfcrx = pfcrx; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_general); + +int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, + u8 promisc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_rqp_calc_context *context; + int err; + u32 in_mod; + u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? + MCAST_DIRECT : MCAST_DEFAULT; + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->base_qpn = cpu_to_be32(base_qpn); + context->n_mac = dev->caps.log_num_macs; + context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | + base_qpn); + context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | + base_qpn); + context->intra_no_vlan = 0; + context->no_vlan = MLX4_NO_VLAN_IDX; + context->intra_vlan_miss = 0; + context->vlan_miss = MLX4_VLAN_MISS_IDX; + + in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); + +int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; + if (ignore_fcs_value) + context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; + else + context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check); + +enum { + VXLAN_ENABLE_MODIFY = 1 << 7, + VXLAN_STEERING_MODIFY = 1 << 6, + + VXLAN_ENABLE = 1 << 7, +}; + +struct mlx4_set_port_vxlan_context { + u32 reserved1; + u8 modify_flags; + u8 reserved2; + u8 enable_flags; + u8 steering; +}; + +int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable) +{ + int err; + u32 in_mod; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_vxlan_context *context; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof(*context)); + + context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; + if (enable) + context->enable_flags = VXLAN_ENABLE; + context->steering = steering; + + in_mod = MLX4_SET_PORT_VXLAN << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); + +int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + *((__be32 *)mailbox->buf) = cpu_to_be32(time); + + err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_BEACON); + +int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + + return err; +} + +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, + u64 mac, u64 clear, u8 mode) +{ + return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, + MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} +EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR); + +int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + + return err; +} + +int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, + u32 in_mod, struct mlx4_cmd_mailbox *outbox) +{ + return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, + MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); +} + +int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + if (slave != dev->caps.function) + return 0; + return mlx4_common_dump_eth_stats(dev, slave, + vhcr->in_modifier, outbox); +} + +int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, + int *slave_id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, found_ix = -1; + int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + struct mlx4_slaves_pport slaves_pport; + unsigned num_vfs; + int slave_gid; + + if (!mlx4_is_mfunc(dev)) + return -EINVAL; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + num_vfs = bitmap_weight(slaves_pport.slaves, + dev->persist->num_vfs + 1) - 1; + + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, + MLX4_ROCE_GID_ENTRY_SIZE)) { + found_ix = i; + break; + } + } + + if (found_ix >= 0) { + /* Calculate a slave_gid which is the slave number in the gid + * table and not a globally unique slave number. + */ + if (found_ix < MLX4_ROCE_PF_GIDS) + slave_gid = 0; + else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * + (vf_gids / num_vfs + 1)) + slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) / + (vf_gids / num_vfs + 1)) + 1; + else + slave_gid = + ((found_ix - MLX4_ROCE_PF_GIDS - + ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / + (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; + + /* Calculate the globally unique slave id */ + if (slave_gid) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_active_ports actv_ports; + struct mlx4_slaves_pport slaves_pport_actv; + unsigned max_port_p_one; + int num_vfs_before = 0; + int candidate_slave_gid; + + /* Calculate how many VFs are on the previous port, if exists */ + for (i = 1; i < port; i++) { + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + num_vfs_before += bitmap_weight( + slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + + /* candidate_slave_gid isn't necessarily the correct slave, but + * it has the same number of ports and is assigned to the same + * ports as the real slave we're looking for. On dual port VF, + * slave_gid = [single port VFs on port <port>] + + * [offset of the current slave from the first dual port VF] + + * 1 (for the PF). + */ + candidate_slave_gid = slave_gid + num_vfs_before; + + actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid); + max_port_p_one = find_first_bit( + actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, + dev->caps.num_ports) + 1; + + /* Calculate the real slave number */ + for (i = 1; i < max_port_p_one; i++) { + if (i == port) + continue; + bitmap_zero(exclusive_ports.ports, + dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid += bitmap_weight( + slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + } + *slave_id = slave_gid; + } + + return (found_ix >= 0) ? 0 : -EINVAL; +} +EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid); + +int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, + u8 *gid) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev)) + return -EINVAL; + + memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + return 0; +} +EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); + +/* Cable Module Info */ +#define MODULE_INFO_MAX_READ 48 + +#define I2C_ADDR_LOW 0x50 +#define I2C_ADDR_HIGH 0x51 +#define I2C_PAGE_SIZE 256 + +/* Module Info Data */ +struct mlx4_cable_info { + u8 i2c_addr; + u8 page_num; + __be16 dev_mem_address; + __be16 reserved1; + __be16 size; + __be32 reserved2[2]; + u8 data[MODULE_INFO_MAX_READ]; +}; + +enum cable_info_err { + CABLE_INF_INV_PORT = 0x1, + CABLE_INF_OP_NOSUP = 0x2, + CABLE_INF_NOT_CONN = 0x3, + CABLE_INF_NO_EEPRM = 0x4, + CABLE_INF_PAGE_ERR = 0x5, + CABLE_INF_INV_ADDR = 0x6, + CABLE_INF_I2C_ADDR = 0x7, + CABLE_INF_QSFP_VIO = 0x8, + CABLE_INF_I2C_BUSY = 0x9, +}; + +#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF) + +static inline const char *cable_info_mad_err_str(u16 mad_status) +{ + u8 err = MAD_STATUS_2_CABLE_ERR(mad_status); + + switch (err) { + case CABLE_INF_INV_PORT: + return "invalid port selected"; + case CABLE_INF_OP_NOSUP: + return "operation not supported for this port (the port is of type CX4 or internal)"; + case CABLE_INF_NOT_CONN: + return "cable is not connected"; + case CABLE_INF_NO_EEPRM: + return "the connected cable has no EPROM (passive copper cable)"; + case CABLE_INF_PAGE_ERR: + return "page number is greater than 15"; + case CABLE_INF_INV_ADDR: + return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; + case CABLE_INF_I2C_ADDR: + return "invalid I2C slave address"; + case CABLE_INF_QSFP_VIO: + return "at least one cable violates the QSFP specification and ignores the modsel signal"; + case CABLE_INF_I2C_BUSY: + return "I2C bus is constantly busy"; + } + return "Unknown Error"; +} + +/** + * mlx4_get_module_info - Read cable module eeprom data + * @dev: mlx4_dev. + * @port: port number. + * @offset: byte offset in eeprom to start reading data from. + * @size: num of bytes to read. + * @data: output buffer to put the requested data into. + * + * Reads cable module eeprom data, puts the outcome data into + * data pointer paramer. + * Returns num of read bytes on success or a negative error + * code. + */ +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_mad_ifc *inmad, *outmad; + struct mlx4_cable_info *cable_info; + u16 i2c_addr; + int ret; + + if (size > MODULE_INFO_MAX_READ) + size = MODULE_INFO_MAX_READ; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inmad = (struct mlx4_mad_ifc *)(inbox->buf); + outmad = (struct mlx4_mad_ifc *)(outbox->buf); + + inmad->method = 0x1; /* Get */ + inmad->class_version = 0x1; + inmad->mgmt_class = 0x1; + inmad->base_version = 0x1; + inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ + + if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE) + /* Cross pages reads are not allowed + * read until offset 256 in low page + */ + size -= offset + size - I2C_PAGE_SIZE; + + i2c_addr = I2C_ADDR_LOW; + if (offset >= I2C_PAGE_SIZE) { + /* Reset offset to high page */ + i2c_addr = I2C_ADDR_HIGH; + offset -= I2C_PAGE_SIZE; + } + + cable_info = (struct mlx4_cable_info *)inmad->data; + cable_info->dev_mem_address = cpu_to_be16(offset); + cable_info->page_num = 0; + cable_info->i2c_addr = i2c_addr; + cable_info->size = cpu_to_be16(size); + + ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (ret) + goto out; + + if (be16_to_cpu(outmad->status)) { + /* Mad returned with bad status */ + ret = be16_to_cpu(outmad->status); + mlx4_warn(dev, + "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", + 0xFF60, port, i2c_addr, offset, size, + ret, cable_info_mad_err_str(ret)); + + if (i2c_addr == I2C_ADDR_HIGH && + MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) + /* Some SFP cables do not support i2c slave + * address 0x51 (high page), abort silently. + */ + ret = 0; + else + ret = -ret; + goto out; + } + cable_info = (struct mlx4_cable_info *)outmad->data; + memcpy(data, cable_info->data, size); + ret = size; +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} +EXPORT_SYMBOL(mlx4_get_module_info); diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c new file mode 100644 index 000000000..2bf437aaf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/slab.h> + +#include "mlx4.h" +#include "fw.h" + +enum { + MLX4_RES_QP, + MLX4_RES_RDMARC, + MLX4_RES_ALTC, + MLX4_RES_AUXC, + MLX4_RES_SRQ, + MLX4_RES_CQ, + MLX4_RES_EQ, + MLX4_RES_DMPT, + MLX4_RES_CMPT, + MLX4_RES_MTT, + MLX4_RES_MCG, + MLX4_RES_NUM +}; + +static const char *res_name[] = { + [MLX4_RES_QP] = "QP", + [MLX4_RES_RDMARC] = "RDMARC", + [MLX4_RES_ALTC] = "ALTC", + [MLX4_RES_AUXC] = "AUXC", + [MLX4_RES_SRQ] = "SRQ", + [MLX4_RES_CQ] = "CQ", + [MLX4_RES_EQ] = "EQ", + [MLX4_RES_DMPT] = "DMPT", + [MLX4_RES_CMPT] = "CMPT", + [MLX4_RES_MTT] = "MTT", + [MLX4_RES_MCG] = "MCG", +}; + +u64 mlx4_make_profile(struct mlx4_dev *dev, + struct mlx4_profile *request, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource { + u64 size; + u64 start; + int type; + u32 num; + int log_num; + }; + + u64 total_size = 0; + struct mlx4_resource *profile; + struct mlx4_resource tmp; + struct sysinfo si; + int i, j; + + profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); + if (!profile) + return -ENOMEM; + + /* + * We want to scale the number of MTTs with the size of the + * system memory, since it makes sense to register a lot of + * memory on a system with a lot of memory. As a heuristic, + * make sure we have enough MTTs to cover twice the system + * memory (with PAGE_SIZE entries). + * + * This number has to be a power of two and fit into 32 bits + * due to device limitations, so cap this at 2^31 as well. + * That limits us to 8TB of memory registration per HCA with + * 4KB pages, which is probably OK for the next few months. + */ + si_meminfo(&si); + request->num_mtt = + roundup_pow_of_two(max_t(unsigned, request->num_mtt, + min(1UL << (31 - log_mtts_per_seg), + si.totalram >> (log_mtts_per_seg - 1)))); + + profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; + profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; + profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; + profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz; + profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz; + profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz; + profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; + profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; + profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; + profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz; + profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); + + profile[MLX4_RES_QP].num = request->num_qp; + profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp; + profile[MLX4_RES_ALTC].num = request->num_qp; + profile[MLX4_RES_AUXC].num = request->num_qp; + profile[MLX4_RES_SRQ].num = request->num_srq; + profile[MLX4_RES_CQ].num = request->num_cq; + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs : + min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); + profile[MLX4_RES_DMPT].num = request->num_mpt; + profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; + profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); + profile[MLX4_RES_MCG].num = request->num_mcg; + + for (i = 0; i < MLX4_RES_NUM; ++i) { + profile[i].type = i; + profile[i].num = roundup_pow_of_two(profile[i].num); + profile[i].log_num = ilog2(profile[i].num); + profile[i].size *= profile[i].num; + profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); + } + + /* + * Sort the resources in decreasing order of size. Since they + * all have sizes that are powers of 2, we'll be able to keep + * resources aligned to their size and pack them without gaps + * using the sorted order. + */ + for (i = MLX4_RES_NUM; i > 0; --i) + for (j = 1; j < i; ++j) { + if (profile[j].size > profile[j - 1].size) { + tmp = profile[j]; + profile[j] = profile[j - 1]; + profile[j - 1] = tmp; + } + } + + for (i = 0; i < MLX4_RES_NUM; ++i) { + if (profile[i].size) { + profile[i].start = total_size; + total_size += profile[i].size; + } + + if (total_size > dev_cap->max_icm_sz) { + mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n", + (unsigned long long) total_size, + (unsigned long long) dev_cap->max_icm_sz); + kfree(profile); + return -ENOMEM; + } + + if (profile[i].size) + mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n", + i, res_name[profile[i].type], + profile[i].log_num, + (unsigned long long) profile[i].start, + (unsigned long long) profile[i].size); + } + + mlx4_dbg(dev, "HCA context memory: reserving %d KB\n", + (int) (total_size >> 10)); + + for (i = 0; i < MLX4_RES_NUM; ++i) { + switch (profile[i].type) { + case MLX4_RES_QP: + dev->caps.num_qps = profile[i].num; + init_hca->qpc_base = profile[i].start; + init_hca->log_num_qps = profile[i].log_num; + break; + case MLX4_RES_RDMARC: + for (priv->qp_table.rdmarc_shift = 0; + request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; + ++priv->qp_table.rdmarc_shift) + ; /* nothing */ + dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; + priv->qp_table.rdmarc_base = (u32) profile[i].start; + init_hca->rdmarc_base = profile[i].start; + init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; + break; + case MLX4_RES_ALTC: + init_hca->altc_base = profile[i].start; + break; + case MLX4_RES_AUXC: + init_hca->auxc_base = profile[i].start; + break; + case MLX4_RES_SRQ: + dev->caps.num_srqs = profile[i].num; + init_hca->srqc_base = profile[i].start; + init_hca->log_num_srqs = profile[i].log_num; + break; + case MLX4_RES_CQ: + dev->caps.num_cqs = profile[i].num; + init_hca->cqc_base = profile[i].start; + init_hca->log_num_cqs = profile[i].log_num; + break; + case MLX4_RES_EQ: + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + init_hca->log_num_eqs = 0x1f; + init_hca->eqc_base = profile[i].start; + init_hca->num_sys_eqs = dev_cap->num_sys_eqs; + } else { + dev->caps.num_eqs = roundup_pow_of_two( + min_t(unsigned, + dev_cap->max_eqs, + MAX_MSIX)); + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + } + break; + case MLX4_RES_DMPT: + dev->caps.num_mpts = profile[i].num; + priv->mr_table.mpt_base = profile[i].start; + init_hca->dmpt_base = profile[i].start; + init_hca->log_mpt_sz = profile[i].log_num; + break; + case MLX4_RES_CMPT: + init_hca->cmpt_base = profile[i].start; + break; + case MLX4_RES_MTT: + dev->caps.num_mtts = profile[i].num; + priv->mr_table.mtt_base = profile[i].start; + init_hca->mtt_base = profile[i].start; + break; + case MLX4_RES_MCG: + init_hca->mc_base = profile[i].start; + init_hca->log_mc_entry_sz = + ilog2(mlx4_get_mgm_entry_size(dev)); + init_hca->log_mc_table_sz = profile[i].log_num; + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + dev->caps.num_mgms = profile[i].num; + } else { + init_hca->log_mc_hash_sz = + profile[i].log_num - 1; + dev->caps.num_mgms = profile[i].num >> 1; + dev->caps.num_amgms = profile[i].num >> 1; + } + break; + default: + break; + } + } + + /* + * PDs don't take any HCA memory, but we assign them as part + * of the HCA profile anyway. + */ + dev->caps.num_pds = MLX4_NUM_PDS; + + kfree(profile); + return total_size; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c new file mode 100644 index 000000000..b75214a80 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -0,0 +1,909 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/gfp.h> +#include <linux/export.h> + +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> + +#include "mlx4.h" +#include "icm.h" + +/* QP to support BF should have bits 6,7 cleared */ +#define MLX4_BF_QP_SKIP_MASK 0xc0 +#define MLX4_MAX_BF_QP_RANGE 0x40 + +void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_qp *qp; + + spin_lock(&qp_table->lock); + + qp = __mlx4_qp_lookup(dev, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&qp_table->lock); + + if (!qp) { + mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +/* used for INIT/CLOSE port logic */ +static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) +{ + /* this procedure is called after we already know we are on the master */ + /* qp0 is either the proxy qp0, or the real qp0 */ + u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev); + *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; + + *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && + qp->qpn <= dev->phys_caps.base_sqpn + 1; + + return *real_qp0 || *proxy_qp0; +} + +static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, + enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp, int native) +{ + static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { + [MLX4_QP_STATE_RST] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, + }, + [MLX4_QP_STATE_INIT] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, + [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, + }, + [MLX4_QP_STATE_RTR] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, + }, + [MLX4_QP_STATE_RTS] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, + [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, + }, + [MLX4_QP_STATE_SQD] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, + [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, + }, + [MLX4_QP_STATE_SQER] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, + }, + [MLX4_QP_STATE_ERR] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + } + }; + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + int ret = 0; + int real_qp0 = 0; + int proxy_qp0 = 0; + u8 port; + + if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || + !op[cur_state][new_state]) + return -EINVAL; + + if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { + ret = mlx4_cmd(dev, 0, qp->qpn, 2, + MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); + if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && + cur_state != MLX4_QP_STATE_RST && + is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { + port = (qp->qpn & 1) + 1; + if (proxy_qp0) + priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; + else + priv->mfunc.master.qp0_state[port].qp0_active = 0; + } + return ret; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { + u64 mtt_addr = mlx4_mtt_addr(dev, mtt); + context->mtt_base_addr_h = mtt_addr >> 32; + context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + } + + *(__be32 *) mailbox->buf = cpu_to_be32(optpar); + memcpy(mailbox->buf + 8, context, sizeof *context); + + ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = + cpu_to_be32(qp->qpn); + + ret = mlx4_cmd(dev, mailbox->dma, + qp->qpn | (!!sqd_event << 31), + new_state == MLX4_QP_STATE_RST ? 2 : 0, + op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); + + if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { + port = (qp->qpn & 1) + 1; + if (cur_state != MLX4_QP_STATE_ERR && + cur_state != MLX4_QP_STATE_RST && + new_state == MLX4_QP_STATE_ERR) { + if (proxy_qp0) + priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; + else + priv->mfunc.master.qp0_state[port].qp0_active = 0; + } else if (new_state == MLX4_QP_STATE_RTR) { + if (proxy_qp0) + priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1; + else + priv->mfunc.master.qp0_state[port].qp0_active = 1; + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} + +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, + enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp) +{ + return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, + optpar, sqd_event, qp, 0); +} +EXPORT_SYMBOL_GPL(mlx4_qp_modify); + +int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags) +{ + u32 uid; + int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP); + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + + if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp) + return -ENOMEM; + + uid = MLX4_QP_TABLE_ZONE_GENERAL; + if (flags & (u8)MLX4_RESERVE_A0_QP) { + if (bf_qp) + uid = MLX4_QP_TABLE_ZONE_RAW_ETH; + else + uid = MLX4_QP_TABLE_ZONE_RSS; + } + + *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, + bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL); + if (*base == -1) + return -ENOMEM; + + return 0; +} + +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags) +{ + u64 in_param = 0; + u64 out_param; + int err; + + /* Turn off all unsupported QP allocation flags */ + flags &= dev->caps.alloc_res_qp_mask; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt); + set_param_h(&in_param, align); + err = mlx4_cmd_imm(dev, in_param, &out_param, + RES_QP, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + + *base = get_param_l(&out_param); + return 0; + } + return __mlx4_qp_reserve_range(dev, cnt, align, base, flags); +} +EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); + +void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + + if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) + return; + mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); +} + +void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, base_qpn); + set_param_h(&in_param, cnt); + err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) { + mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", + base_qpn, cnt); + } + } else + __mlx4_qp_release_range(dev, base_qpn, cnt); +} +EXPORT_SYMBOL_GPL(mlx4_qp_release_range); + +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + int err; + + err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); + if (err) + goto err_put_qp; + + err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); + if (err) + goto err_put_auxc; + + err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); + if (err) + goto err_put_altc; + + err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); + if (err) + goto err_put_rdmarc; + + return 0; + +err_put_rdmarc: + mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); + +err_put_altc: + mlx4_table_put(dev, &qp_table->altc_table, qpn); + +err_put_auxc: + mlx4_table_put(dev, &qp_table->auxc_table, qpn); + +err_put_qp: + mlx4_table_put(dev, &qp_table->qp_table, qpn); + +err_out: + return err; +} + +static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) +{ + u64 param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(¶m, qpn); + return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM, + MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + } + return __mlx4_qp_alloc_icm(dev, qpn, gfp); +} + +void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + + mlx4_table_put(dev, &qp_table->cmpt_table, qpn); + mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); + mlx4_table_put(dev, &qp_table->altc_table, qpn); + mlx4_table_put(dev, &qp_table->auxc_table, qpn); + mlx4_table_put(dev, &qp_table->qp_table, qpn); +} + +static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, qpn); + if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); + } else + __mlx4_qp_free_icm(dev, qpn); +} + +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + int err; + + if (!qpn) + return -EINVAL; + + qp->qpn = qpn; + + err = mlx4_qp_alloc_icm(dev, qpn, gfp); + if (err) + return err; + + spin_lock_irq(&qp_table->lock); + err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & + (dev->caps.num_qps - 1), qp); + spin_unlock_irq(&qp_table->lock); + if (err) + goto err_icm; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + + return 0; + +err_icm: + mlx4_qp_free_icm(dev, qpn); + return err; +} + +EXPORT_SYMBOL_GPL(mlx4_qp_alloc); + +int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *cmd; + u64 pri_addr_path_mask = 0; + u64 qp_mask = 0; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cmd = (struct mlx4_update_qp_context *)mailbox->buf; + + if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) + return -EINVAL; + + if (attr & MLX4_UPDATE_QP_SMAC) { + pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; + cmd->qp_context.pri_path.grh_mylmc = params->smac_index; + } + + if (attr & MLX4_UPDATE_QP_VSD) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; + if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) + cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); + } + + if (attr & MLX4_UPDATE_QP_RATE_LIMIT) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT; + cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val); + } + + if (attr & MLX4_UPDATE_QP_QOS_VPORT) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; + cmd->qp_context.qos_vport = params->qos_vport; + } + + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + cmd->qp_mask = cpu_to_be64(qp_mask); + + err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_update_qp); + +void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + unsigned long flags; + + spin_lock_irqsave(&qp_table->lock, flags); + radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); + spin_unlock_irqrestore(&qp_table->lock, flags); +} +EXPORT_SYMBOL_GPL(mlx4_qp_remove); + +void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) +{ + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); + + mlx4_qp_free_icm(dev, qp->qpn); +} +EXPORT_SYMBOL_GPL(mlx4_qp_free); + +static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) +{ + return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2 +#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1 +#define MLX4_QP_TABLE_RAW_ETH_SIZE 256 + +static int mlx4_create_zones(struct mlx4_dev *dev, + u32 reserved_bottom_general, + u32 reserved_top_general, + u32 reserved_bottom_rss, + u32 start_offset_rss, + u32 max_table_offset) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL; + int bitmap_initialized = 0; + u32 last_offset; + int k; + int err; + + qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP); + + if (NULL == qp_table->zones) + return -ENOMEM; + + bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); + + if (NULL == bitmap) { + err = -ENOMEM; + goto free_zone; + } + + err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps, + (1 << 23) - 1, reserved_bottom_general, + reserved_top_general); + + if (err) + goto free_bitmap; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL, + MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO | + MLX4_ZONE_USE_RR, 0, + 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL); + + if (err) + goto free_bitmap; + + err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS, + reserved_bottom_rss, + reserved_bottom_rss - 1, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + reserved_bottom_rss - start_offset_rss); + + if (err) + goto free_bitmap; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS, + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | + MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY, + 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS); + + if (err) + goto free_bitmap; + + last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + /* We have a single zone for the A0 steering QPs area of the FW. This area + * needs to be split into subareas. One set of subareas is for RSS QPs + * (in which qp number bits 6 and/or 7 are set); the other set of subareas + * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero. + * Currently, the values returned by the FW (A0 steering area starting qp number + * and A0 steering area size) are such that there are only two subareas -- one + * for RSS and one for RAW_ETH. + */ + for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]); + k++) { + int size; + u32 offset = start_offset_rss; + u32 bf_mask; + u32 requested_size; + + /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates + * a mask of all LSB bits set until (and not including) the first + * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK + * is 0xc0, bf_mask will be 0x3f. + */ + bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1; + requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1); + + if (((last_offset & MLX4_BF_QP_SKIP_MASK) && + ((int)(max_table_offset - last_offset)) >= + roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) || + (!(last_offset & MLX4_BF_QP_SKIP_MASK) && + !((last_offset + requested_size - 1) & + MLX4_BF_QP_SKIP_MASK))) + size = requested_size; + else { + u32 candidate_offset = + (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1; + + if (last_offset & MLX4_BF_QP_SKIP_MASK) + last_offset = candidate_offset; + + /* From this point, the BF bits are 0 */ + + if (last_offset > max_table_offset) { + /* need to skip */ + size = -1; + } else { + size = min3(max_table_offset - last_offset, + bf_mask - (last_offset & bf_mask), + requested_size); + if (size < requested_size) { + int candidate_size; + + candidate_size = min3( + max_table_offset - candidate_offset, + bf_mask - (last_offset & bf_mask), + requested_size); + + /* We will not take this path if last_offset was + * already set above to candidate_offset + */ + if (candidate_size > size) { + last_offset = candidate_offset; + size = candidate_size; + } + } + } + } + + if (size > 0) { + /* mlx4_bitmap_alloc_range will find a contiguous range of "size" + * QPs in which both bits 6 and 7 are zero, because we pass it the + * MLX4_BF_SKIP_MASK). + */ + offset = mlx4_bitmap_alloc_range( + *bitmap + MLX4_QP_TABLE_ZONE_RSS, + size, 1, + MLX4_BF_QP_SKIP_MASK); + + if (offset == (u32)-1) { + err = -ENOMEM; + break; + } + + last_offset = offset + size; + + err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size), + roundup_pow_of_two(size) - 1, 0, + roundup_pow_of_two(size) - size); + } else { + /* Add an empty bitmap, we'll allocate from different zones (since + * at least one is reserved) + */ + err = mlx4_bitmap_init(*bitmap + k, 1, + MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, + 0); + mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); + } + + if (err) + break; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + k, + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | + MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY, + offset, qp_table->zones_uids + k); + + if (err) + break; + } + + if (err) + goto free_bitmap; + + qp_table->bitmap_gen = *bitmap; + + return err; + +free_bitmap: + for (k = 0; k < bitmap_initialized; k++) + mlx4_bitmap_cleanup(*bitmap + k); + kfree(bitmap); +free_zone: + mlx4_zone_allocator_destroy(qp_table->zones); + return err; +} + +static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + + if (qp_table->zones) { + int i; + + for (i = 0; + i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]); + i++) { + struct mlx4_bitmap *bitmap = + mlx4_zone_get_bitmap(qp_table->zones, + qp_table->zones_uids[i]); + + mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]); + if (NULL == bitmap) + continue; + + mlx4_bitmap_cleanup(bitmap); + } + mlx4_zone_allocator_destroy(qp_table->zones); + kfree(qp_table->bitmap_gen); + qp_table->bitmap_gen = NULL; + qp_table->zones = NULL; + } +} + +int mlx4_init_qp_table(struct mlx4_dev *dev) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + int err; + int reserved_from_top = 0; + int reserved_from_bot; + int k; + int fixed_reserved_from_bot_rv = 0; + int bottom_reserved_for_rss_bitmap; + u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base + + dev->caps.dmfs_high_rate_qpn_range; + + spin_lock_init(&qp_table->lock); + INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; + + /* We reserve 2 extra QPs per port for the special QPs. The + * block of special QPs must be aligned to a multiple of 8, so + * round up. + * + * We also reserve the MSB of the 24-bit QP number to indicate + * that a QP is an XRC QP. + */ + for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++) + fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k]; + + if (fixed_reserved_from_bot_rv < max_table_offset) + fixed_reserved_from_bot_rv = max_table_offset; + + /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/ + bottom_reserved_for_rss_bitmap = + roundup_pow_of_two(fixed_reserved_from_bot_rv + 1); + dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8); + + { + int sort[MLX4_NUM_QP_REGION]; + int i, j, tmp; + int last_base = dev->caps.num_qps; + + for (i = 1; i < MLX4_NUM_QP_REGION; ++i) + sort[i] = i; + + for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) { + for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) { + if (dev->caps.reserved_qps_cnt[sort[j]] > + dev->caps.reserved_qps_cnt[sort[j - 1]]) { + tmp = sort[j]; + sort[j] = sort[j - 1]; + sort[j - 1] = tmp; + } + } + } + + for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) { + last_base -= dev->caps.reserved_qps_cnt[sort[i]]; + dev->caps.reserved_qps_base[sort[i]] = last_base; + reserved_from_top += + dev->caps.reserved_qps_cnt[sort[i]]; + } + } + + /* Reserve 8 real SQPs in both native and SRIOV modes. + * In addition, in SRIOV mode, reserve 8 proxy SQPs per function + * (for all PFs and VFs), and 8 corresponding tunnel QPs. + * Each proxy SQP works opposite its own tunnel QP. + * + * The QPs are arranged as follows: + * a. 8 real SQPs + * b. All the proxy SQPs (8 per function) + * c. All the tunnel QPs (8 per function) + */ + reserved_from_bot = mlx4_num_reserved_sqps(dev); + if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) { + mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n"); + return -EINVAL; + } + + err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot, + bottom_reserved_for_rss_bitmap, + fixed_reserved_from_bot_rv, + max_table_offset); + + if (err) + return err; + + if (mlx4_is_mfunc(dev)) { + /* for PPF use */ + dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8; + dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX; + + /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, + * since the PF does not call mlx4_slave_caps */ + dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + + if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || + !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { + err = -ENOMEM; + goto err_mem; + } + + for (k = 0; k < dev->caps.num_ports; k++) { + dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn + + 8 * mlx4_master_func_num(dev) + k; + dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX; + dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn + + 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k; + dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX; + } + } + + + err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn); + if (err) + goto err_mem; + + return err; + +err_mem: + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + dev->caps.qp0_tunnel = dev->caps.qp0_proxy = + dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; + mlx4_cleanup_qp_zones(dev); + return err; +} + +void mlx4_cleanup_qp_table(struct mlx4_dev *dev) +{ + if (mlx4_is_slave(dev)) + return; + + mlx4_CONF_SPECIAL_QP(dev, 0); + + mlx4_cleanup_qp_zones(dev); +} + +int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, + struct mlx4_qp_context *context) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, + MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (!err) + memcpy(context, mailbox->buf + 8, sizeof *context); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_qp_query); + +int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_qp_context *context, + struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) +{ + int err; + int i; + enum mlx4_qp_state states[] = { + MLX4_QP_STATE_RST, + MLX4_QP_STATE_INIT, + MLX4_QP_STATE_RTR, + MLX4_QP_STATE_RTS + }; + + for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { + context->flags &= cpu_to_be32(~(0xf << 28)); + context->flags |= cpu_to_be32(states[i + 1] << 28); + if (states[i + 1] != MLX4_QP_STATE_RTR) + context->params2 &= ~MLX4_QP_BIT_FPP; + err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], + context, 0, 0, qp); + if (err) { + mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n", + states[i + 1], err); + return err; + } + + *qp_state = states[i + 1]; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c new file mode 100644 index 000000000..0076d8858 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/reset.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/jiffies.h> + +#include "mlx4.h" + +int mlx4_reset(struct mlx4_dev *dev) +{ + void __iomem *reset; + u32 *hca_header = NULL; + int pcie_cap; + u16 devctl; + u16 linkctl; + u16 vendor; + unsigned long end; + u32 sem; + int i; + int err = 0; + +#define MLX4_RESET_BASE 0xf0000 +#define MLX4_RESET_SIZE 0x400 +#define MLX4_SEM_OFFSET 0x3fc +#define MLX4_RESET_OFFSET 0x10 +#define MLX4_RESET_VALUE swab32(1) + +#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ) +#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ) + + /* + * Reset the chip. This is somewhat ugly because we have to + * save off the PCI header before reset and then restore it + * after the chip reboots. We skip config space offsets 22 + * and 23 since those have a special meaning. + */ + + /* Do we need to save off the full 4K PCI Express header?? */ + hca_header = kmalloc(256, GFP_KERNEL); + if (!hca_header) { + err = -ENOMEM; + mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n"); + goto out; + } + + pcie_cap = pci_pcie_cap(dev->persist->pdev); + + for (i = 0; i < 64; ++i) { + if (i == 22 || i == 23) + continue; + if (pci_read_config_dword(dev->persist->pdev, i * 4, + hca_header + i)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); + goto out; + } + } + + reset = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_RESET_BASE, + MLX4_RESET_SIZE); + if (!reset) { + err = -ENOMEM; + mlx4_err(dev, "Couldn't map HCA reset register, aborting\n"); + goto out; + } + + /* grab HW semaphore to lock out flash updates */ + end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES; + do { + sem = readl(reset + MLX4_SEM_OFFSET); + if (!sem) + break; + + msleep(1); + } while (time_before(jiffies, end)); + + if (sem) { + mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n"); + err = -EAGAIN; + iounmap(reset); + goto out; + } + + /* actually hit reset */ + writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); + iounmap(reset); + + /* Docs say to wait one second before accessing device */ + msleep(1000); + + end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; + do { + if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID, + &vendor) && vendor != 0xffff) + break; + + msleep(1); + } while (time_before(jiffies, end)); + + if (vendor == 0xffff) { + err = -ENODEV; + mlx4_err(dev, "PCI device did not come back after reset, aborting\n"); + goto out; + } + + /* Now restore the PCI headers */ + if (pcie_cap) { + devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; + if (pcie_capability_write_word(dev->persist->pdev, + PCI_EXP_DEVCTL, + devctl)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); + goto out; + } + linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; + if (pcie_capability_write_word(dev->persist->pdev, + PCI_EXP_LNKCTL, + linkctl)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); + goto out; + } + } + + for (i = 0; i < 16; ++i) { + if (i * 4 == PCI_COMMAND) + continue; + + if (pci_write_config_dword(dev->persist->pdev, i * 4, + hca_header[i])) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", + i); + goto out; + } + } + + if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND, + hca_header[PCI_COMMAND / 4])) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); + goto out; + } + +out: + kfree(hca_header); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c new file mode 100644 index 000000000..bafe2180c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -0,0 +1,4957 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> +#include <linux/if_ether.h> +#include <linux/etherdevice.h> + +#include "mlx4.h" +#include "fw.h" + +#define MLX4_MAC_VALID (1ull << 63) + +struct mac_res { + struct list_head list; + u64 mac; + int ref_count; + u8 smac_index; + u8 port; +}; + +struct vlan_res { + struct list_head list; + u16 vlan; + int ref_count; + int vlan_index; + u8 port; +}; + +struct res_common { + struct list_head list; + struct rb_node node; + u64 res_id; + int owner; + int state; + int from_state; + int to_state; + int removing; +}; + +enum { + RES_ANY_BUSY = 1 +}; + +struct res_gid { + struct list_head list; + u8 gid[16]; + enum mlx4_protocol prot; + enum mlx4_steer_type steer; + u64 reg_id; +}; + +enum res_qp_states { + RES_QP_BUSY = RES_ANY_BUSY, + + /* QP number was allocated */ + RES_QP_RESERVED, + + /* ICM memory for QP context was mapped */ + RES_QP_MAPPED, + + /* QP is in hw ownership */ + RES_QP_HW +}; + +struct res_qp { + struct res_common com; + struct res_mtt *mtt; + struct res_cq *rcq; + struct res_cq *scq; + struct res_srq *srq; + struct list_head mcg_list; + spinlock_t mcg_spl; + int local_qpn; + atomic_t ref_count; + u32 qpc_flags; + /* saved qp params before VST enforcement in order to restore on VGT */ + u8 sched_queue; + __be32 param3; + u8 vlan_control; + u8 fvl_rx; + u8 pri_path_fl; + u8 vlan_index; + u8 feup; +}; + +enum res_mtt_states { + RES_MTT_BUSY = RES_ANY_BUSY, + RES_MTT_ALLOCATED, +}; + +static inline const char *mtt_states_str(enum res_mtt_states state) +{ + switch (state) { + case RES_MTT_BUSY: return "RES_MTT_BUSY"; + case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; + default: return "Unknown"; + } +} + +struct res_mtt { + struct res_common com; + int order; + atomic_t ref_count; +}; + +enum res_mpt_states { + RES_MPT_BUSY = RES_ANY_BUSY, + RES_MPT_RESERVED, + RES_MPT_MAPPED, + RES_MPT_HW, +}; + +struct res_mpt { + struct res_common com; + struct res_mtt *mtt; + int key; +}; + +enum res_eq_states { + RES_EQ_BUSY = RES_ANY_BUSY, + RES_EQ_RESERVED, + RES_EQ_HW, +}; + +struct res_eq { + struct res_common com; + struct res_mtt *mtt; +}; + +enum res_cq_states { + RES_CQ_BUSY = RES_ANY_BUSY, + RES_CQ_ALLOCATED, + RES_CQ_HW, +}; + +struct res_cq { + struct res_common com; + struct res_mtt *mtt; + atomic_t ref_count; +}; + +enum res_srq_states { + RES_SRQ_BUSY = RES_ANY_BUSY, + RES_SRQ_ALLOCATED, + RES_SRQ_HW, +}; + +struct res_srq { + struct res_common com; + struct res_mtt *mtt; + struct res_cq *cq; + atomic_t ref_count; +}; + +enum res_counter_states { + RES_COUNTER_BUSY = RES_ANY_BUSY, + RES_COUNTER_ALLOCATED, +}; + +struct res_counter { + struct res_common com; + int port; +}; + +enum res_xrcdn_states { + RES_XRCD_BUSY = RES_ANY_BUSY, + RES_XRCD_ALLOCATED, +}; + +struct res_xrcdn { + struct res_common com; + int port; +}; + +enum res_fs_rule_states { + RES_FS_RULE_BUSY = RES_ANY_BUSY, + RES_FS_RULE_ALLOCATED, +}; + +struct res_fs_rule { + struct res_common com; + int qpn; +}; + +static void *res_tracker_lookup(struct rb_root *root, u64 res_id) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct res_common *res = container_of(node, struct res_common, + node); + + if (res_id < res->res_id) + node = node->rb_left; + else if (res_id > res->res_id) + node = node->rb_right; + else + return res; + } + return NULL; +} + +static int res_tracker_insert(struct rb_root *root, struct res_common *res) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct res_common *this = container_of(*new, struct res_common, + node); + + parent = *new; + if (res->res_id < this->res_id) + new = &((*new)->rb_left); + else if (res->res_id > this->res_id) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&res->node, parent, new); + rb_insert_color(&res->node, root); + + return 0; +} + +enum qp_transition { + QP_TRANS_INIT2RTR, + QP_TRANS_RTR2RTS, + QP_TRANS_RTS2RTS, + QP_TRANS_SQERR2RTS, + QP_TRANS_SQD2SQD, + QP_TRANS_SQD2RTS +}; + +/* For Debug uses */ +static const char *resource_str(enum mlx4_resource rt) +{ + switch (rt) { + case RES_QP: return "RES_QP"; + case RES_CQ: return "RES_CQ"; + case RES_SRQ: return "RES_SRQ"; + case RES_MPT: return "RES_MPT"; + case RES_MTT: return "RES_MTT"; + case RES_MAC: return "RES_MAC"; + case RES_VLAN: return "RES_VLAN"; + case RES_EQ: return "RES_EQ"; + case RES_COUNTER: return "RES_COUNTER"; + case RES_FS_RULE: return "RES_FS_RULE"; + case RES_XRCD: return "RES_XRCD"; + default: return "Unknown resource type !!!"; + }; +} + +static void rem_slave_vlans(struct mlx4_dev *dev, int slave); +static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, + enum mlx4_resource res_type, int count, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[res_type]; + int err = -EINVAL; + int allocated, free, reserved, guaranteed, from_free; + int from_rsvd; + + if (slave > dev->persist->num_vfs) + return -EINVAL; + + spin_lock(&res_alloc->alloc_lock); + allocated = (port > 0) ? + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] : + res_alloc->allocated[slave]; + free = (port > 0) ? res_alloc->res_port_free[port - 1] : + res_alloc->res_free; + reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : + res_alloc->res_reserved; + guaranteed = res_alloc->guaranteed[slave]; + + if (allocated + count > res_alloc->quota[slave]) { + mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n", + slave, port, resource_str(res_type), count, + allocated, res_alloc->quota[slave]); + goto out; + } + + if (allocated + count <= guaranteed) { + err = 0; + from_rsvd = count; + } else { + /* portion may need to be obtained from free area */ + if (guaranteed - allocated > 0) + from_free = count - (guaranteed - allocated); + else + from_free = count; + + from_rsvd = count - from_free; + + if (free - from_free >= reserved) + err = 0; + else + mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n", + slave, port, resource_str(res_type), free, + from_free, reserved); + } + + if (!err) { + /* grant the request */ + if (port > 0) { + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] += count; + res_alloc->res_port_free[port - 1] -= count; + res_alloc->res_port_rsvd[port - 1] -= from_rsvd; + } else { + res_alloc->allocated[slave] += count; + res_alloc->res_free -= count; + res_alloc->res_reserved -= from_rsvd; + } + } + +out: + spin_unlock(&res_alloc->alloc_lock); + return err; +} + +static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, + enum mlx4_resource res_type, int count, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[res_type]; + int allocated, guaranteed, from_rsvd; + + if (slave > dev->persist->num_vfs) + return; + + spin_lock(&res_alloc->alloc_lock); + + allocated = (port > 0) ? + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] : + res_alloc->allocated[slave]; + guaranteed = res_alloc->guaranteed[slave]; + + if (allocated - count >= guaranteed) { + from_rsvd = 0; + } else { + /* portion may need to be returned to reserved area */ + if (allocated - guaranteed > 0) + from_rsvd = count - (allocated - guaranteed); + else + from_rsvd = count; + } + + if (port > 0) { + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] -= count; + res_alloc->res_port_free[port - 1] += count; + res_alloc->res_port_rsvd[port - 1] += from_rsvd; + } else { + res_alloc->allocated[slave] -= count; + res_alloc->res_free += count; + res_alloc->res_reserved += from_rsvd; + } + + spin_unlock(&res_alloc->alloc_lock); + return; +} + +static inline void initialize_res_quotas(struct mlx4_dev *dev, + struct resource_allocator *res_alloc, + enum mlx4_resource res_type, + int vf, int num_instances) +{ + res_alloc->guaranteed[vf] = num_instances / + (2 * (dev->persist->num_vfs + 1)); + res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; + if (vf == mlx4_master_func_num(dev)) { + res_alloc->res_free = num_instances; + if (res_type == RES_MTT) { + /* reserved mtts will be taken out of the PF allocation */ + res_alloc->res_free += dev->caps.reserved_mtts; + res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; + res_alloc->quota[vf] += dev->caps.reserved_mtts; + } + } +} + +void mlx4_init_quotas(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int pf; + + /* quotas for VFs are initialized in mlx4_slave_cap */ + if (mlx4_is_slave(dev)) + return; + + if (!mlx4_is_mfunc(dev)) { + dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - + mlx4_num_reserved_sqps(dev); + dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; + dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; + dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; + dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; + return; + } + + pf = mlx4_master_func_num(dev); + dev->quotas.qp = + priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; + dev->quotas.cq = + priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; + dev->quotas.srq = + priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; + dev->quotas.mtt = + priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; + dev->quotas.mpt = + priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; +} +int mlx4_init_resource_tracker(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, j; + int t; + + priv->mfunc.master.res_tracker.slave_list = + kzalloc(dev->num_slaves * sizeof(struct slave_list), + GFP_KERNEL); + if (!priv->mfunc.master.res_tracker.slave_list) + return -ENOMEM; + + for (i = 0 ; i < dev->num_slaves; i++) { + for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) + INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. + slave_list[i].res_list[t]); + mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + } + + mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", + dev->num_slaves); + for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) + priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; + + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[i]; + res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) * + sizeof(int), GFP_KERNEL); + res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) * + sizeof(int), GFP_KERNEL); + if (i == RES_MAC || i == RES_VLAN) + res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * + (dev->persist->num_vfs + + 1) * + sizeof(int), GFP_KERNEL); + else + res_alloc->allocated = kzalloc((dev->persist-> + num_vfs + 1) * + sizeof(int), GFP_KERNEL); + + if (!res_alloc->quota || !res_alloc->guaranteed || + !res_alloc->allocated) + goto no_mem_err; + + spin_lock_init(&res_alloc->alloc_lock); + for (t = 0; t < dev->persist->num_vfs + 1; t++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, t); + switch (i) { + case RES_QP: + initialize_res_quotas(dev, res_alloc, RES_QP, + t, dev->caps.num_qps - + dev->caps.reserved_qps - + mlx4_num_reserved_sqps(dev)); + break; + case RES_CQ: + initialize_res_quotas(dev, res_alloc, RES_CQ, + t, dev->caps.num_cqs - + dev->caps.reserved_cqs); + break; + case RES_SRQ: + initialize_res_quotas(dev, res_alloc, RES_SRQ, + t, dev->caps.num_srqs - + dev->caps.reserved_srqs); + break; + case RES_MPT: + initialize_res_quotas(dev, res_alloc, RES_MPT, + t, dev->caps.num_mpts - + dev->caps.reserved_mrws); + break; + case RES_MTT: + initialize_res_quotas(dev, res_alloc, RES_MTT, + t, dev->caps.num_mtts - + dev->caps.reserved_mtts); + break; + case RES_MAC: + if (t == mlx4_master_func_num(dev)) { + int max_vfs_pport = 0; + /* Calculate the max vfs per port for */ + /* both ports. */ + for (j = 0; j < dev->caps.num_ports; + j++) { + struct mlx4_slaves_pport slaves_pport = + mlx4_phys_to_slaves_pport(dev, j + 1); + unsigned current_slaves = + bitmap_weight(slaves_pport.slaves, + dev->caps.num_ports) - 1; + if (max_vfs_pport < current_slaves) + max_vfs_pport = + current_slaves; + } + res_alloc->quota[t] = + MLX4_MAX_MAC_NUM - + 2 * max_vfs_pport; + res_alloc->guaranteed[t] = 2; + for (j = 0; j < MLX4_MAX_PORTS; j++) + res_alloc->res_port_free[j] = + MLX4_MAX_MAC_NUM; + } else { + res_alloc->quota[t] = MLX4_MAX_MAC_NUM; + res_alloc->guaranteed[t] = 2; + } + break; + case RES_VLAN: + if (t == mlx4_master_func_num(dev)) { + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; + res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; + for (j = 0; j < MLX4_MAX_PORTS; j++) + res_alloc->res_port_free[j] = + res_alloc->quota[t]; + } else { + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; + res_alloc->guaranteed[t] = 0; + } + break; + case RES_COUNTER: + res_alloc->quota[t] = dev->caps.max_counters; + res_alloc->guaranteed[t] = 0; + if (t == mlx4_master_func_num(dev)) + res_alloc->res_free = res_alloc->quota[t]; + break; + default: + break; + } + if (i == RES_MAC || i == RES_VLAN) { + for (j = 0; j < dev->caps.num_ports; j++) + if (test_bit(j, actv_ports.ports)) + res_alloc->res_port_rsvd[j] += + res_alloc->guaranteed[t]; + } else { + res_alloc->res_reserved += res_alloc->guaranteed[t]; + } + } + } + spin_lock_init(&priv->mfunc.master.res_tracker.lock); + return 0; + +no_mem_err: + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; + } + return -ENOMEM; +} + +void mlx4_free_resource_tracker(struct mlx4_dev *dev, + enum mlx4_res_tracker_free_type type) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + if (priv->mfunc.master.res_tracker.slave_list) { + if (type != RES_TR_FREE_STRUCTS_ONLY) { + for (i = 0; i < dev->num_slaves; i++) { + if (type == RES_TR_FREE_ALL || + dev->caps.function != i) + mlx4_delete_all_resources_for_slave(dev, i); + } + /* free master's vlans */ + i = dev->caps.function; + mlx4_reset_roce_gids(dev, i); + mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + rem_slave_vlans(dev, i); + mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + } + + if (type != RES_TR_FREE_SLAVES_ONLY) { + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; + } + kfree(priv->mfunc.master.res_tracker.slave_list); + priv->mfunc.master.res_tracker.slave_list = NULL; + } + } +} + +static void update_pkey_index(struct mlx4_dev *dev, int slave, + struct mlx4_cmd_mailbox *inbox) +{ + u8 sched = *(u8 *)(inbox->buf + 64); + u8 orig_index = *(u8 *)(inbox->buf + 35); + u8 new_index; + struct mlx4_priv *priv = mlx4_priv(dev); + int port; + + port = (sched >> 6 & 1) + 1; + + new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; + *(u8 *)(inbox->buf + 35) = new_index; +} + +static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, + u8 slave) +{ + struct mlx4_qp_context *qp_ctx = inbox->buf + 8; + enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); + u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + int port; + + if (MLX4_QP_ST_UD == ts) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) + qp_ctx->pri_path.mgid_index = + mlx4_get_base_gid_ix(dev, slave, port) | 0x80; + else + qp_ctx->pri_path.mgid_index = slave | 0x80; + + } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->pri_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->pri_path.mgid_index &= 0x7f; + } else { + qp_ctx->pri_path.mgid_index = slave & 0x7F; + } + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->alt_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->alt_path.mgid_index &= 0x7f; + } else { + qp_ctx->alt_path.mgid_index = slave & 0x7F; + } + } + } +} + +static int update_vport_qp_param(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *inbox, + u8 slave, u32 qpn) +{ + struct mlx4_qp_context *qpc = inbox->buf + 8; + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_priv *priv; + u32 qp_type; + int port, err = 0; + + port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; + priv = mlx4_priv(dev); + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + + if (MLX4_VGT != vp_oper->state.default_vlan) { + /* the reserved QPs (special, proxy, tunnel) + * do not operate over vlans + */ + if (mlx4_is_qp_reserved(dev, qpn)) + return 0; + + /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ + if (qp_type == MLX4_QP_ST_UD || + (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { + if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { + *(__be32 *)inbox->buf = + cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | + MLX4_QP_OPTPAR_VLAN_STRIPPING); + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + } else { + struct mlx4_update_qp_params params = {.flags = 0}; + + err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + if (err) + goto out; + } + } + + if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } else if (0 != vp_oper->state.default_vlan) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + } else { /* priority tagged */ + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } + + qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; + qpc->pri_path.vlan_index = vp_oper->vlan_idx; + qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; + qpc->pri_path.sched_queue &= 0xC7; + qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; + qpc->qos_vport = vp_oper->state.qos_vport; + } + if (vp_oper->state.spoofchk) { + qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; + qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; + } +out: + return err; +} + +static int mpt_mask(struct mlx4_dev *dev) +{ + return dev->caps.num_mpts - 1; +} + +static void *find_res(struct mlx4_dev *dev, u64 res_id, + enum mlx4_resource type) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], + res_id); +} + +static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type, + void *res) +{ + struct res_common *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = find_res(dev, res_id, type); + if (!r) { + err = -ENONET; + goto exit; + } + + if (r->state == RES_ANY_BUSY) { + err = -EBUSY; + goto exit; + } + + if (r->owner != slave) { + err = -EPERM; + goto exit; + } + + r->from_state = r->state; + r->state = RES_ANY_BUSY; + + if (res) + *((struct res_common **)res) = r; + +exit: + spin_unlock_irq(mlx4_tlock(dev)); + return err; +} + +int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, + enum mlx4_resource type, + u64 res_id, int *slave) +{ + + struct res_common *r; + int err = -ENOENT; + int id = res_id; + + if (type == RES_QP) + id &= 0x7fffff; + spin_lock(mlx4_tlock(dev)); + + r = find_res(dev, id, type); + if (r) { + *slave = r->owner; + err = 0; + } + spin_unlock(mlx4_tlock(dev)); + + return err; +} + +static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type) +{ + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = find_res(dev, res_id, type); + if (r) + r->state = r->from_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static struct res_common *alloc_qp_tr(int id) +{ + struct res_qp *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_QP_RESERVED; + ret->local_qpn = id; + INIT_LIST_HEAD(&ret->mcg_list); + spin_lock_init(&ret->mcg_spl); + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_mtt_tr(int id, int order) +{ + struct res_mtt *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->order = order; + ret->com.state = RES_MTT_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_mpt_tr(int id, int key) +{ + struct res_mpt *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_MPT_RESERVED; + ret->key = key; + + return &ret->com; +} + +static struct res_common *alloc_eq_tr(int id) +{ + struct res_eq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_EQ_RESERVED; + + return &ret->com; +} + +static struct res_common *alloc_cq_tr(int id) +{ + struct res_cq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_CQ_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_srq_tr(int id) +{ + struct res_srq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_SRQ_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_counter_tr(int id) +{ + struct res_counter *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_COUNTER_ALLOCATED; + + return &ret->com; +} + +static struct res_common *alloc_xrcdn_tr(int id) +{ + struct res_xrcdn *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_XRCD_ALLOCATED; + + return &ret->com; +} + +static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) +{ + struct res_fs_rule *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_FS_RULE_ALLOCATED; + ret->qpn = qpn; + return &ret->com; +} + +static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, + int extra) +{ + struct res_common *ret; + + switch (type) { + case RES_QP: + ret = alloc_qp_tr(id); + break; + case RES_MPT: + ret = alloc_mpt_tr(id, extra); + break; + case RES_MTT: + ret = alloc_mtt_tr(id, extra); + break; + case RES_EQ: + ret = alloc_eq_tr(id); + break; + case RES_CQ: + ret = alloc_cq_tr(id); + break; + case RES_SRQ: + ret = alloc_srq_tr(id); + break; + case RES_MAC: + pr_err("implementation missing\n"); + return NULL; + case RES_COUNTER: + ret = alloc_counter_tr(id); + break; + case RES_XRCD: + ret = alloc_xrcdn_tr(id); + break; + case RES_FS_RULE: + ret = alloc_fs_rule_tr(id, extra); + break; + default: + return NULL; + } + if (ret) + ret->owner = slave; + + return ret; +} + +static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, + enum mlx4_resource type, int extra) +{ + int i; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + struct res_common **res_arr; + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct rb_root *root = &tracker->res_tree[type]; + + res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); + if (!res_arr) + return -ENOMEM; + + for (i = 0; i < count; ++i) { + res_arr[i] = alloc_tr(base + i, type, slave, extra); + if (!res_arr[i]) { + for (--i; i >= 0; --i) + kfree(res_arr[i]); + + kfree(res_arr); + return -ENOMEM; + } + } + + spin_lock_irq(mlx4_tlock(dev)); + for (i = 0; i < count; ++i) { + if (find_res(dev, base + i, type)) { + err = -EEXIST; + goto undo; + } + err = res_tracker_insert(root, res_arr[i]); + if (err) + goto undo; + list_add_tail(&res_arr[i]->list, + &tracker->slave_list[slave].res_list[type]); + } + spin_unlock_irq(mlx4_tlock(dev)); + kfree(res_arr); + + return 0; + +undo: + for (--i; i >= base; --i) + rb_erase(&res_arr[i]->node, root); + + spin_unlock_irq(mlx4_tlock(dev)); + + for (i = 0; i < count; ++i) + kfree(res_arr[i]); + + kfree(res_arr); + + return err; +} + +static int remove_qp_ok(struct res_qp *res) +{ + if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || + !list_empty(&res->mcg_list)) { + pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", + res->com.state, atomic_read(&res->ref_count)); + return -EBUSY; + } else if (res->com.state != RES_QP_RESERVED) { + return -EPERM; + } + + return 0; +} + +static int remove_mtt_ok(struct res_mtt *res, int order) +{ + if (res->com.state == RES_MTT_BUSY || + atomic_read(&res->ref_count)) { + pr_devel("%s-%d: state %s, ref_count %d\n", + __func__, __LINE__, + mtt_states_str(res->com.state), + atomic_read(&res->ref_count)); + return -EBUSY; + } else if (res->com.state != RES_MTT_ALLOCATED) + return -EPERM; + else if (res->order != order) + return -EINVAL; + + return 0; +} + +static int remove_mpt_ok(struct res_mpt *res) +{ + if (res->com.state == RES_MPT_BUSY) + return -EBUSY; + else if (res->com.state != RES_MPT_RESERVED) + return -EPERM; + + return 0; +} + +static int remove_eq_ok(struct res_eq *res) +{ + if (res->com.state == RES_MPT_BUSY) + return -EBUSY; + else if (res->com.state != RES_MPT_RESERVED) + return -EPERM; + + return 0; +} + +static int remove_counter_ok(struct res_counter *res) +{ + if (res->com.state == RES_COUNTER_BUSY) + return -EBUSY; + else if (res->com.state != RES_COUNTER_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_xrcdn_ok(struct res_xrcdn *res) +{ + if (res->com.state == RES_XRCD_BUSY) + return -EBUSY; + else if (res->com.state != RES_XRCD_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_fs_rule_ok(struct res_fs_rule *res) +{ + if (res->com.state == RES_FS_RULE_BUSY) + return -EBUSY; + else if (res->com.state != RES_FS_RULE_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_cq_ok(struct res_cq *res) +{ + if (res->com.state == RES_CQ_BUSY) + return -EBUSY; + else if (res->com.state != RES_CQ_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_srq_ok(struct res_srq *res) +{ + if (res->com.state == RES_SRQ_BUSY) + return -EBUSY; + else if (res->com.state != RES_SRQ_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) +{ + switch (type) { + case RES_QP: + return remove_qp_ok((struct res_qp *)res); + case RES_CQ: + return remove_cq_ok((struct res_cq *)res); + case RES_SRQ: + return remove_srq_ok((struct res_srq *)res); + case RES_MPT: + return remove_mpt_ok((struct res_mpt *)res); + case RES_MTT: + return remove_mtt_ok((struct res_mtt *)res, extra); + case RES_MAC: + return -ENOSYS; + case RES_EQ: + return remove_eq_ok((struct res_eq *)res); + case RES_COUNTER: + return remove_counter_ok((struct res_counter *)res); + case RES_XRCD: + return remove_xrcdn_ok((struct res_xrcdn *)res); + case RES_FS_RULE: + return remove_fs_rule_ok((struct res_fs_rule *)res); + default: + return -EINVAL; + } +} + +static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, + enum mlx4_resource type, int extra) +{ + u64 i; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + for (i = base; i < base + count; ++i) { + r = res_tracker_lookup(&tracker->res_tree[type], i); + if (!r) { + err = -ENOENT; + goto out; + } + if (r->owner != slave) { + err = -EPERM; + goto out; + } + err = remove_ok(r, type, extra); + if (err) + goto out; + } + + for (i = base; i < base + count; ++i) { + r = res_tracker_lookup(&tracker->res_tree[type], i); + rb_erase(&r->node, &tracker->res_tree[type]); + list_del(&r->list); + kfree(r); + } + err = 0; + +out: + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, + enum res_qp_states state, struct res_qp **qp, + int alloc) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_qp *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_QP_BUSY: + mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", + __func__, r->com.res_id); + err = -EBUSY; + break; + + case RES_QP_RESERVED: + if (r->com.state == RES_QP_MAPPED && !alloc) + break; + + mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); + err = -EINVAL; + break; + + case RES_QP_MAPPED: + if ((r->com.state == RES_QP_RESERVED && alloc) || + r->com.state == RES_QP_HW) + break; + else { + mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", + r->com.res_id); + err = -EINVAL; + } + + break; + + case RES_QP_HW: + if (r->com.state != RES_QP_MAPPED) + err = -EINVAL; + break; + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_QP_BUSY; + if (qp) + *qp = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_mpt_states state, struct res_mpt **mpt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_mpt *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_MPT_BUSY: + err = -EINVAL; + break; + + case RES_MPT_RESERVED: + if (r->com.state != RES_MPT_MAPPED) + err = -EINVAL; + break; + + case RES_MPT_MAPPED: + if (r->com.state != RES_MPT_RESERVED && + r->com.state != RES_MPT_HW) + err = -EINVAL; + break; + + case RES_MPT_HW: + if (r->com.state != RES_MPT_MAPPED) + err = -EINVAL; + break; + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_MPT_BUSY; + if (mpt) + *mpt = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_eq_states state, struct res_eq **eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_eq *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_EQ_BUSY: + err = -EINVAL; + break; + + case RES_EQ_RESERVED: + if (r->com.state != RES_EQ_HW) + err = -EINVAL; + break; + + case RES_EQ_HW: + if (r->com.state != RES_EQ_RESERVED) + err = -EINVAL; + break; + + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_EQ_BUSY; + if (eq) + *eq = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, + enum res_cq_states state, struct res_cq **cq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_cq *r; + int err; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); + if (!r) { + err = -ENOENT; + } else if (r->com.owner != slave) { + err = -EPERM; + } else if (state == RES_CQ_ALLOCATED) { + if (r->com.state != RES_CQ_HW) + err = -EINVAL; + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + else + err = 0; + } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { + err = -EINVAL; + } else { + err = 0; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_CQ_BUSY; + if (cq) + *cq = r; + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_srq_states state, struct res_srq **srq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_srq *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); + if (!r) { + err = -ENOENT; + } else if (r->com.owner != slave) { + err = -EPERM; + } else if (state == RES_SRQ_ALLOCATED) { + if (r->com.state != RES_SRQ_HW) + err = -EINVAL; + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_SRQ_BUSY; + if (srq) + *srq = r; + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static void res_abort_move(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[type], id); + if (r && (r->owner == slave)) + r->state = r->from_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void res_end_move(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[type], id); + if (r && (r->owner == slave)) + r->state = r->to_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) +{ + return mlx4_is_qp_reserved(dev, qpn) && + (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); +} + +static int fw_reserved(struct mlx4_dev *dev, int qpn) +{ + return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; +} + +static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err; + int count; + int align; + int base; + int qpn; + u8 flags; + + switch (op) { + case RES_OP_RESERVE: + count = get_param_l(&in_param) & 0xffffff; + /* Turn off all unsupported QP allocation flags that the + * slave tries to set. + */ + flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask; + align = get_param_h(&in_param); + err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); + if (err) + return err; + + err = __mlx4_qp_reserve_range(dev, count, align, &base, flags); + if (err) { + mlx4_release_resource(dev, slave, RES_QP, count, 0); + return err; + } + + err = add_res_range(dev, slave, base, count, RES_QP, 0); + if (err) { + mlx4_release_resource(dev, slave, RES_QP, count, 0); + __mlx4_qp_release_range(dev, base, count); + return err; + } + set_param_l(out_param, base); + break; + case RES_OP_MAP_ICM: + qpn = get_param_l(&in_param) & 0x7fffff; + if (valid_reserved(dev, slave, qpn)) { + err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); + if (err) + return err; + } + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, + NULL, 1); + if (err) + return err; + + if (!fw_reserved(dev, qpn)) { + err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); + if (err) { + res_abort_move(dev, slave, RES_QP, qpn); + return err; + } + } + + res_end_move(dev, slave, RES_QP, qpn); + break; + + default: + err = -EINVAL; + break; + } + return err; +} + +static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int base; + int order; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + order = get_param_l(&in_param); + + err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); + if (err) + return err; + + base = __mlx4_alloc_mtt_range(dev, order); + if (base == -1) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); + return -ENOMEM; + } + + err = add_res_range(dev, slave, base, 1, RES_MTT, order); + if (err) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); + __mlx4_free_mtt_range(dev, base, order); + } else { + set_param_l(out_param, base); + } + + return err; +} + +static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int index; + int id; + struct res_mpt *mpt; + + switch (op) { + case RES_OP_RESERVE: + err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); + if (err) + break; + + index = __mlx4_mpt_reserve(dev); + if (index == -1) { + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); + break; + } + id = index & mpt_mask(dev); + + err = add_res_range(dev, slave, id, 1, RES_MPT, index); + if (err) { + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); + __mlx4_mpt_release(dev, index); + break; + } + set_param_l(out_param, index); + break; + case RES_OP_MAP_ICM: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_MAPPED, &mpt); + if (err) + return err; + + err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); + if (err) { + res_abort_move(dev, slave, RES_MPT, id); + return err; + } + + res_end_move(dev, slave, RES_MPT, id); + break; + } + return err; +} + +static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int cqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); + if (err) + break; + + err = __mlx4_cq_alloc_icm(dev, &cqn); + if (err) { + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); + break; + } + + err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); + if (err) { + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); + __mlx4_cq_free_icm(dev, cqn); + break; + } + + set_param_l(out_param, cqn); + break; + + default: + err = -EINVAL; + } + + return err; +} + +static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int srqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); + if (err) + break; + + err = __mlx4_srq_alloc_icm(dev, &srqn); + if (err) { + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); + break; + } + + err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); + if (err) { + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); + __mlx4_srq_free_icm(dev, srqn); + break; + } + + set_param_l(out_param, srqn); + break; + + default: + err = -EINVAL; + } + + return err; +} + +static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, + u8 smac_index, u64 *mac) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->smac_index == smac_index && res->port == (u8) port) { + *mac = res->mac; + return 0; + } + } + return -ENOENT; +} + +static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + /* mac found. update ref count */ + ++res->ref_count; + return 0; + } + } + + if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) + return -EINVAL; + res = kzalloc(sizeof *res, GFP_KERNEL); + if (!res) { + mlx4_release_resource(dev, slave, RES_MAC, 1, port); + return -ENOMEM; + } + res->mac = mac; + res->port = (u8) port; + res->smac_index = smac_index; + res->ref_count = 1; + list_add_tail(&res->list, + &tracker->slave_list[slave].res_list[RES_MAC]); + return 0; +} + +static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_MAC, 1, port); + kfree(res); + } + break; + } + } +} + +static void rem_slave_macs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + int i; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + list_del(&res->list); + /* dereference the mac the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_mac(dev, res->port, res->mac); + mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); + kfree(res); + } +} + +static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int in_port) +{ + int err = -EINVAL; + int port; + u64 mac; + u8 smac_index; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + mac = in_param; + + err = __mlx4_register_mac(dev, port, mac); + if (err >= 0) { + smac_index = err; + set_param_l(out_param, err); + err = 0; + } + + if (!err) { + err = mac_add_to_slave(dev, slave, mac, port, smac_index); + if (err) + __mlx4_unregister_mac(dev, port, mac); + } + return err; +} + +static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, + int port, int vlan_index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + if (res->vlan == vlan && res->port == (u8) port) { + /* vlan found. update ref count */ + ++res->ref_count; + return 0; + } + } + + if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) + return -EINVAL; + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + mlx4_release_resource(dev, slave, RES_VLAN, 1, port); + return -ENOMEM; + } + res->vlan = vlan; + res->port = (u8) port; + res->vlan_index = vlan_index; + res->ref_count = 1; + list_add_tail(&res->list, + &tracker->slave_list[slave].res_list[RES_VLAN]); + return 0; +} + + +static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + if (res->vlan == vlan && res->port == (u8) port) { + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_VLAN, + 1, port); + kfree(res); + } + break; + } + } +} + +static void rem_slave_vlans(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + int i; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + list_del(&res->list); + /* dereference the vlan the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_vlan(dev, res->port, res->vlan); + mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); + kfree(res); + } +} + +static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int in_port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int err; + u16 vlan; + int vlan_index; + int port; + + port = !in_port ? get_param_l(out_param) : in_port; + + if (!port || op != RES_OP_RESERVE_AND_MAP) + return -EINVAL; + + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ + if (!in_port && port > 0 && port <= dev->caps.num_ports) { + slave_state[slave].old_vlan_api = true; + return 0; + } + + vlan = (u16) in_param; + + err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); + if (!err) { + set_param_l(out_param, (u32) vlan_index); + err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); + if (err) + __mlx4_unregister_vlan(dev, port, vlan); + } + return err; +} + +static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + u32 index; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); + if (err) + return err; + + err = __mlx4_counter_alloc(dev, &index); + if (err) { + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + return err; + } + + err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); + if (err) { + __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + } else { + set_param_l(out_param, index); + } + + return err; +} + +static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + u32 xrcdn; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + err = __mlx4_xrcd_alloc(dev, &xrcdn); + if (err) + return err; + + err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); + if (err) + __mlx4_xrcd_free(dev, xrcdn); + else + set_param_l(out_param, xrcdn); + + return err; +} + +int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int alop = vhcr->op_modifier; + + switch (vhcr->in_modifier & 0xFF) { + case RES_QP: + err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MTT: + err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MPT: + err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_CQ: + err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_SRQ: + err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MAC: + err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_VLAN: + err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_COUNTER: + err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_XRCD: + err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param) +{ + int err; + int count; + int base; + int qpn; + + switch (op) { + case RES_OP_RESERVE: + base = get_param_l(&in_param) & 0x7fffff; + count = get_param_h(&in_param); + err = rem_res_range(dev, slave, base, count, RES_QP, 0); + if (err) + break; + mlx4_release_resource(dev, slave, RES_QP, count, 0); + __mlx4_qp_release_range(dev, base, count); + break; + case RES_OP_MAP_ICM: + qpn = get_param_l(&in_param) & 0x7fffff; + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, + NULL, 0); + if (err) + return err; + + if (!fw_reserved(dev, qpn)) + __mlx4_qp_free_icm(dev, qpn); + + res_end_move(dev, slave, RES_QP, qpn); + + if (valid_reserved(dev, slave, qpn)) + err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int base; + int order; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + base = get_param_l(&in_param); + order = get_param_h(&in_param); + err = rem_res_range(dev, slave, base, 1, RES_MTT, order); + if (!err) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); + __mlx4_free_mtt_range(dev, base, order); + } + return err; +} + +static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param) +{ + int err = -EINVAL; + int index; + int id; + struct res_mpt *mpt; + + switch (op) { + case RES_OP_RESERVE: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = get_res(dev, slave, id, RES_MPT, &mpt); + if (err) + break; + index = mpt->key; + put_res(dev, slave, id, RES_MPT); + + err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); + if (err) + break; + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); + __mlx4_mpt_release(dev, index); + break; + case RES_OP_MAP_ICM: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_RESERVED, &mpt); + if (err) + return err; + + __mlx4_mpt_free_icm(dev, mpt->key); + res_end_move(dev, slave, RES_MPT, id); + return err; + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int cqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + cqn = get_param_l(&in_param); + err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); + if (err) + break; + + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); + __mlx4_cq_free_icm(dev, cqn); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int srqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + srqn = get_param_l(&in_param); + err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); + if (err) + break; + + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); + __mlx4_srq_free_icm(dev, srqn); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int in_port) +{ + int port; + int err = 0; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + mac_del_from_slave(dev, slave, in_param, port); + __mlx4_unregister_mac(dev, port, in_param); + break; + default: + err = -EINVAL; + break; + } + + return err; + +} + +static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int err = 0; + + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + switch (op) { + case RES_OP_RESERVE_AND_MAP: + if (slave_state[slave].old_vlan_api) + return 0; + if (!port) + return -EINVAL; + vlan_del_from_slave(dev, slave, in_param, port); + __mlx4_unregister_vlan(dev, port, in_param); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int index; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + index = get_param_l(&in_param); + err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); + if (err) + return err; + + __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + + return err; +} + +static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int xrcdn; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + xrcdn = get_param_l(&in_param); + err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); + if (err) + return err; + + __mlx4_xrcd_free(dev, xrcdn); + + return err; +} + +int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = -EINVAL; + int alop = vhcr->op_modifier; + + switch (vhcr->in_modifier & 0xFF) { + case RES_QP: + err = qp_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param); + break; + + case RES_MTT: + err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MPT: + err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param); + break; + + case RES_CQ: + err = cq_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_SRQ: + err = srq_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MAC: + err = mac_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_VLAN: + err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_COUNTER: + err = counter_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_XRCD: + err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + + default: + break; + } + return err; +} + +/* ugly but other choices are uglier */ +static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) +{ + return (be32_to_cpu(mpt->flags) >> 9) & 1; +} + +static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) +{ + return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; +} + +static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->mtt_sz); +} + +static u32 mr_get_pd(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; +} + +static int mr_is_fmr(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; +} + +static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; +} + +static int mr_is_region(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; +} + +static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; +} + +static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) +{ + return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int qp_get_mtt_size(struct mlx4_qp_context *qpc) +{ + int page_shift = (qpc->log_page_size & 0x3f) + 12; + int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; + int log_sq_sride = qpc->sq_size_stride & 7; + int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; + int log_rq_stride = qpc->rq_size_stride & 7; + int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; + int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; + int sq_size; + int rq_size; + int total_pages; + int total_mem; + int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; + + sq_size = 1 << (log_sq_size + log_sq_sride + 4); + rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); + total_mem = sq_size + rq_size; + total_pages = + roundup_pow_of_two((total_mem + (page_offset << 6)) >> + page_shift); + + return total_pages; +} + +static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, + int size, struct res_mtt *mtt) +{ + int res_start = mtt->com.res_id; + int res_size = (1 << mtt->order); + + if (start < res_start || start + size > res_start + res_size) + return -EPERM; + return 0; +} + +int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mtt *mtt; + struct res_mpt *mpt; + int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; + int phys; + int id; + u32 pd; + int pd_slave; + + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); + if (err) + return err; + + /* Disable memory windows for VFs. */ + if (!mr_is_region(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + + /* Make sure that the PD bits related to the slave id are zeros. */ + pd = mr_get_pd(inbox->buf); + pd_slave = (pd >> 17) & 0x7f; + if (pd_slave != 0 && --pd_slave != slave) { + err = -EPERM; + goto ex_abort; + } + + if (mr_is_fmr(inbox->buf)) { + /* FMR and Bind Enable are forbidden in slave devices. */ + if (mr_is_bind_enabled(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + /* FMR and Memory Windows are also forbidden. */ + if (!mr_is_region(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + } + + phys = mr_phys_mpt(inbox->buf); + if (!phys) { + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + + err = check_mtt_range(dev, slave, mtt_base, + mr_get_mtt_size(inbox->buf), mtt); + if (err) + goto ex_put; + + mpt->mtt = mtt; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put; + + if (!phys) { + atomic_inc(&mtt->ref_count); + put_res(dev, slave, mtt->com.res_id, RES_MTT); + } + + res_end_move(dev, slave, RES_MPT, id); + return 0; + +ex_put: + if (!phys) + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_MPT, id); + + return err; +} + +int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mpt *mpt; + int id; + + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); + if (err) + return err; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + + if (mpt->mtt) + atomic_dec(&mpt->mtt->ref_count); + + res_end_move(dev, slave, RES_MPT, id); + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_MPT, id); + + return err; +} + +int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mpt *mpt; + int id; + + id = index & mpt_mask(dev); + err = get_res(dev, slave, id, RES_MPT, &mpt); + if (err) + return err; + + if (mpt->com.from_state == RES_MPT_MAPPED) { + /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do + * that, the VF must read the MPT. But since the MPT entry memory is not + * in the VF's virtual memory space, it must use QUERY_MPT to obtain the + * entry contents. To guarantee that the MPT cannot be changed, the driver + * must perform HW2SW_MPT before this query and return the MPT entry to HW + * ownership fofollowing the change. The change here allows the VF to + * perform QUERY_MPT also when the entry is in SW ownership. + */ + struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( + &mlx4_priv(dev)->mr_table.dmpt_table, + mpt->key, NULL); + + if (NULL == mpt_entry || NULL == outbox->buf) { + err = -EINVAL; + goto out; + } + + memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); + + err = 0; + } else if (mpt->com.from_state == RES_MPT_HW) { + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + } else { + err = -EBUSY; + goto out; + } + + +out: + put_res(dev, slave, id, RES_MPT); + return err; +} + +static int qp_get_rcqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->cqn_recv) & 0xffffff; +} + +static int qp_get_scqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->cqn_send) & 0xffffff; +} + +static u32 qp_get_srqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->srqn) & 0x1ffffff; +} + +static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, + struct mlx4_qp_context *context) +{ + u32 qpn = vhcr->in_modifier & 0xffffff; + u32 qkey = 0; + + if (mlx4_get_parav_qkey(dev, qpn, &qkey)) + return; + + /* adjust qkey in qp context */ + context->qkey = cpu_to_be32(qkey); +} + +int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_mtt *mtt; + struct res_qp *qp; + struct mlx4_qp_context *qpc = inbox->buf + 8; + int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; + int mtt_size = qp_get_mtt_size(qpc); + struct res_cq *rcq; + struct res_cq *scq; + int rcqn = qp_get_rcqn(qpc); + int scqn = qp_get_scqn(qpc); + u32 srqn = qp_get_srqn(qpc) & 0xffffff; + int use_srq = (qp_get_srqn(qpc) >> 24) & 1; + struct res_srq *srq; + int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); + if (err) + return err; + qp->local_qpn = local_qpn; + qp->sched_queue = 0; + qp->param3 = 0; + qp->vlan_control = 0; + qp->fvl_rx = 0; + qp->pri_path_fl = 0; + qp->vlan_index = 0; + qp->feup = 0; + qp->qpc_flags = be32_to_cpu(qpc->flags); + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + + err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); + if (err) + goto ex_put_mtt; + + err = get_res(dev, slave, rcqn, RES_CQ, &rcq); + if (err) + goto ex_put_mtt; + + if (scqn != rcqn) { + err = get_res(dev, slave, scqn, RES_CQ, &scq); + if (err) + goto ex_put_rcq; + } else + scq = rcq; + + if (use_srq) { + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + goto ex_put_scq; + } + + adjust_proxy_tun_qkey(dev, vhcr, qpc); + update_pkey_index(dev, slave, inbox); + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put_srq; + atomic_inc(&mtt->ref_count); + qp->mtt = mtt; + atomic_inc(&rcq->ref_count); + qp->rcq = rcq; + atomic_inc(&scq->ref_count); + qp->scq = scq; + + if (scqn != rcqn) + put_res(dev, slave, scqn, RES_CQ); + + if (use_srq) { + atomic_inc(&srq->ref_count); + put_res(dev, slave, srqn, RES_SRQ); + qp->srq = srq; + } + put_res(dev, slave, rcqn, RES_CQ); + put_res(dev, slave, mtt_base, RES_MTT); + res_end_move(dev, slave, RES_QP, qpn); + + return 0; + +ex_put_srq: + if (use_srq) + put_res(dev, slave, srqn, RES_SRQ); +ex_put_scq: + if (scqn != rcqn) + put_res(dev, slave, scqn, RES_CQ); +ex_put_rcq: + put_res(dev, slave, rcqn, RES_CQ); +ex_put_mtt: + put_res(dev, slave, mtt_base, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_QP, qpn); + + return err; +} + +static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) +{ + return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int eq_get_mtt_size(struct mlx4_eq_context *eqc) +{ + int log_eq_size = eqc->log_eq_size & 0x1f; + int page_shift = (eqc->log_page_size & 0x3f) + 12; + + if (log_eq_size + 5 < page_shift) + return 1; + + return 1 << (log_eq_size + 5 - page_shift); +} + +static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) +{ + return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int cq_get_mtt_size(struct mlx4_cq_context *cqc) +{ + int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; + int page_shift = (cqc->log_page_size & 0x3f) + 12; + + if (log_cq_size + 5 < page_shift) + return 1; + + return 1 << (log_cq_size + 5 - page_shift); +} + +int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int eqn = vhcr->in_modifier; + int res_id = (slave << 10) | eqn; + struct mlx4_eq_context *eqc = inbox->buf; + int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; + int mtt_size = eq_get_mtt_size(eqc); + struct res_eq *eq; + struct res_mtt *mtt; + + err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); + if (err) + return err; + err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); + if (err) + goto out_add; + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto out_move; + + err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); + if (err) + goto out_put; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_put; + + atomic_inc(&mtt->ref_count); + eq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_EQ, res_id); + return 0; + +out_put: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +out_move: + res_abort_move(dev, slave, RES_EQ, res_id); +out_add: + rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); + return err; +} + +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + u8 get = vhcr->op_modifier; + + if (get != 1) + return -EPERM; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + + return err; +} + +static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, + int len, struct res_mtt **res) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_mtt *mtt; + int err = -EINVAL; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], + com.list) { + if (!check_mtt_range(dev, slave, start, len, mtt)) { + *res = mtt; + mtt->com.from_state = mtt->com.state; + mtt->com.state = RES_MTT_BUSY; + err = 0; + break; + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int verify_qp_parameters(struct mlx4_dev *dev, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + enum qp_transition transition, u8 slave) +{ + u32 qp_type; + u32 qpn; + struct mlx4_qp_context *qp_ctx; + enum mlx4_qp_optpar optpar; + int port; + int num_gids; + + qp_ctx = inbox->buf + 8; + qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + optpar = be32_to_cpu(*(__be32 *) inbox->buf); + + if (slave != mlx4_master_func_num(dev)) { + qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; + /* setting QP rate-limit is disallowed for VFs */ + if (qp_ctx->rate_limit_params) + return -EPERM; + } + + switch (qp_type) { + case MLX4_QP_ST_RC: + case MLX4_QP_ST_XRC: + case MLX4_QP_ST_UC: + switch (transition) { + case QP_TRANS_INIT2RTR: + case QP_TRANS_RTR2RTS: + case QP_TRANS_RTS2RTS: + case QP_TRANS_SQD2SQD: + case QP_TRANS_SQD2RTS: + if (slave != mlx4_master_func_num(dev)) + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->pri_path.mgid_index >= num_gids) + return -EINVAL; + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->alt_path.mgid_index >= num_gids) + return -EINVAL; + } + break; + default: + break; + } + break; + + case MLX4_QP_ST_MLX: + qpn = vhcr->in_modifier & 0x7fffff; + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (transition == QP_TRANS_INIT2RTR && + slave != mlx4_master_func_num(dev) && + mlx4_is_qp_reserved(dev, qpn) && + !mlx4_vf_smi_enabled(dev, slave, port)) { + /* only enabled VFs may create MLX proxy QPs */ + mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n", + __func__, slave, port); + return -EPERM; + } + break; + + default: + break; + } + + return 0; +} + +int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_mtt mtt; + __be64 *page_list = inbox->buf; + u64 *pg_list = (u64 *)page_list; + int i; + struct res_mtt *rmtt = NULL; + int start = be64_to_cpu(page_list[0]); + int npages = vhcr->in_modifier; + int err; + + err = get_containing_mtt(dev, slave, start, npages, &rmtt); + if (err) + return err; + + /* Call the SW implementation of write_mtt: + * - Prepare a dummy mtt struct + * - Translate inbox contents to simple addresses in host endianness */ + mtt.offset = 0; /* TBD this is broken but I don't handle it since + we don't really use it */ + mtt.order = 0; + mtt.page_shift = 0; + for (i = 0; i < npages; ++i) + pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); + + err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, + ((u64 *)page_list + 2)); + + if (rmtt) + put_res(dev, slave, rmtt->com.res_id, RES_MTT); + + return err; +} + +int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int eqn = vhcr->in_modifier; + int res_id = eqn | (slave << 10); + struct res_eq *eq; + int err; + + err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); + if (err) + return err; + + err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); + if (err) + goto ex_abort; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put; + + atomic_dec(&eq->mtt->ref_count); + put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_EQ, res_id); + rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); + + return 0; + +ex_put: + put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_EQ, res_id); + + return err; +} + +int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq_info *event_eq; + struct mlx4_cmd_mailbox *mailbox; + u32 in_modifier = 0; + int err; + int res_id; + struct res_eq *req; + + if (!priv->mfunc.master.slave_state) + return -EINVAL; + + /* check for slave valid, slave not PF, and slave active */ + if (slave < 0 || slave > dev->persist->num_vfs || + slave == dev->caps.function || + !priv->mfunc.master.slave_state[slave].active) + return 0; + + event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; + + /* Create the event only if the slave is registered */ + if (event_eq->eqn < 0) + return 0; + + mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); + res_id = (slave << 10) | event_eq->eqn; + err = get_res(dev, slave, res_id, RES_EQ, &req); + if (err) + goto unlock; + + if (req->com.from_state != RES_EQ_HW) { + err = -EINVAL; + goto put; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto put; + } + + if (eqe->type == MLX4_EVENT_TYPE_CMD) { + ++event_eq->token; + eqe->event.cmd.token = cpu_to_be16(event_eq->token); + } + + memcpy(mailbox->buf, (u8 *) eqe, 28); + + in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); + + err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, + MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + put_res(dev, slave, res_id, RES_EQ); + mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + +put: + put_res(dev, slave, res_id, RES_EQ); + +unlock: + mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); + return err; +} + +int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int eqn = vhcr->in_modifier; + int res_id = eqn | (slave << 10); + struct res_eq *eq; + int err; + + err = get_res(dev, slave, res_id, RES_EQ, &eq); + if (err) + return err; + + if (eq->com.from_state != RES_EQ_HW) { + err = -EINVAL; + goto ex_put; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + +ex_put: + put_res(dev, slave, res_id, RES_EQ); + return err; +} + +int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int cqn = vhcr->in_modifier; + struct mlx4_cq_context *cqc = inbox->buf; + int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; + struct res_cq *cq = NULL; + struct res_mtt *mtt; + + err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); + if (err) + return err; + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto out_move; + err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); + if (err) + goto out_put; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_put; + atomic_inc(&mtt->ref_count); + cq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_CQ, cqn); + return 0; + +out_put: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +out_move: + res_abort_move(dev, slave, RES_CQ, cqn); + return err; +} + +int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int cqn = vhcr->in_modifier; + struct res_cq *cq = NULL; + + err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_move; + atomic_dec(&cq->mtt->ref_count); + res_end_move(dev, slave, RES_CQ, cqn); + return 0; + +out_move: + res_abort_move(dev, slave, RES_CQ, cqn); + return err; +} + +int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int cqn = vhcr->in_modifier; + struct res_cq *cq; + int err; + + err = get_res(dev, slave, cqn, RES_CQ, &cq); + if (err) + return err; + + if (cq->com.from_state != RES_CQ_HW) + goto ex_put; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +ex_put: + put_res(dev, slave, cqn, RES_CQ); + + return err; +} + +static int handle_resize(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd, + struct res_cq *cq) +{ + int err; + struct res_mtt *orig_mtt; + struct res_mtt *mtt; + struct mlx4_cq_context *cqc = inbox->buf; + int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; + + err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); + if (err) + return err; + + if (orig_mtt != cq->mtt) { + err = -EINVAL; + goto ex_put; + } + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_put; + + err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); + if (err) + goto ex_put1; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put1; + atomic_dec(&orig_mtt->ref_count); + put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); + atomic_inc(&mtt->ref_count); + cq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + return 0; + +ex_put1: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_put: + put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); + + return err; + +} + +int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int cqn = vhcr->in_modifier; + struct res_cq *cq; + int err; + + err = get_res(dev, slave, cqn, RES_CQ, &cq); + if (err) + return err; + + if (cq->com.from_state != RES_CQ_HW) + goto ex_put; + + if (vhcr->op_modifier == 0) { + err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); + goto ex_put; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +ex_put: + put_res(dev, slave, cqn, RES_CQ); + + return err; +} + +static int srq_get_mtt_size(struct mlx4_srq_context *srqc) +{ + int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; + int log_rq_stride = srqc->logstride & 7; + int page_shift = (srqc->log_page_size & 0x3f) + 12; + + if (log_srq_size + log_rq_stride + 4 < page_shift) + return 1; + + return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); +} + +int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_mtt *mtt; + struct res_srq *srq = NULL; + struct mlx4_srq_context *srqc = inbox->buf; + int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; + + if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) + return -EINVAL; + + err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); + if (err) + return err; + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), + mtt); + if (err) + goto ex_put_mtt; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put_mtt; + + atomic_inc(&mtt->ref_count); + srq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_SRQ, srqn); + return 0; + +ex_put_mtt: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_SRQ, srqn); + + return err; +} + +int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq = NULL; + + err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + atomic_dec(&srq->mtt->ref_count); + if (srq->cq) + atomic_dec(&srq->cq->ref_count); + res_end_move(dev, slave, RES_SRQ, srqn); + + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_SRQ, srqn); + + return err; +} + +int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq; + + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + return err; + if (srq->com.from_state != RES_SRQ_HW) { + err = -EBUSY; + goto out; + } + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, srqn, RES_SRQ); + return err; +} + +int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq; + + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + return err; + + if (srq->com.from_state != RES_SRQ_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, srqn, RES_SRQ); + return err; +} + +int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp_context *context = inbox->buf + 8; + adjust_proxy_tun_qkey(dev, vhcr, context); + update_pkey_index(dev, slave, inbox); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); + u8 pri_sched_queue; + int port = mlx4_slave_convert_port( + dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; + + if (port < 0) + return -EINVAL; + + pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | + ((port & 1) << 6); + + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH || + mlx4_is_eth(dev, port + 1)) { + qpc->pri_path.sched_queue = pri_sched_queue; + } + + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = mlx4_slave_convert_port( + dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) + + 1) - 1; + if (port < 0) + return -EINVAL; + qpc->alt_path.sched_queue = + (qpc->alt_path.sched_queue & ~(1 << 6)) | + (port & 1) << 6; + } + return 0; +} + +static int roce_verify_mac(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + u64 mac; + int port; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + u8 sched = *(u8 *)(inbox->buf + 64); + u8 smac_ix; + + port = (sched >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { + smac_ix = qpc->pri_path.grh_mylmc & 0x7f; + if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) + return -ENOENT; + } + return 0; +} + +int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *qpc = inbox->buf + 8; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + u8 orig_sched_queue; + __be32 orig_param3 = qpc->param3; + u8 orig_vlan_control = qpc->pri_path.vlan_control; + u8 orig_fvl_rx = qpc->pri_path.fvl_rx; + u8 orig_pri_path_fl = qpc->pri_path.fl; + u8 orig_vlan_index = qpc->pri_path.vlan_index; + u8 orig_feup = qpc->pri_path.feup; + + err = adjust_qp_sched_queue(dev, slave, qpc, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); + if (err) + return err; + + if (roce_verify_mac(dev, slave, qpc, inbox)) + return -EINVAL; + + update_pkey_index(dev, slave, inbox); + update_gid(dev, inbox, (u8)slave); + adjust_proxy_tun_qkey(dev, vhcr, qpc); + orig_sched_queue = qpc->pri_path.sched_queue; + err = update_vport_qp_param(dev, inbox, slave, qpn); + if (err) + return err; + + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + /* if no error, save sched queue value passed in by VF. This is + * essentially the QOS value provided by the VF. This will be useful + * if we allow dynamic changes from VST back to VGT + */ + if (!err) { + qp->sched_queue = orig_sched_queue; + qp->param3 = orig_param3; + qp->vlan_control = orig_vlan_control; + qp->fvl_rx = orig_fvl_rx; + qp->pri_path_fl = orig_pri_path_fl; + qp->vlan_index = orig_vlan_index; + qp->feup = orig_feup; + } + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); + if (err) + return err; + + update_pkey_index(dev, slave, inbox); + update_gid(dev, inbox, (u8)slave); + adjust_proxy_tun_qkey(dev, vhcr, context); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); + if (err) + return err; + + update_pkey_index(dev, slave, inbox); + update_gid(dev, inbox, (u8)slave); + adjust_proxy_tun_qkey(dev, vhcr, context); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + + +int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp_context *context = inbox->buf + 8; + int err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + adjust_proxy_tun_qkey(dev, vhcr, context); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); + if (err) + return err; + + adjust_proxy_tun_qkey(dev, vhcr, context); + update_gid(dev, inbox, (u8)slave); + update_pkey_index(dev, slave, inbox); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); + if (err) + return err; + + adjust_proxy_tun_qkey(dev, vhcr, context); + update_gid(dev, inbox, (u8)slave); + update_pkey_index(dev, slave, inbox); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + + atomic_dec(&qp->mtt->ref_count); + atomic_dec(&qp->rcq->ref_count); + atomic_dec(&qp->scq->ref_count); + if (qp->srq) + atomic_dec(&qp->srq->ref_count); + res_end_move(dev, slave, RES_QP, qpn); + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_QP, qpn); + + return err; +} + +static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, + struct res_qp *rqp, u8 *gid) +{ + struct res_gid *res; + + list_for_each_entry(res, &rqp->mcg_list, list) { + if (!memcmp(res->gid, gid, 16)) + return res; + } + return NULL; +} + +static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, + u8 *gid, enum mlx4_protocol prot, + enum mlx4_steer_type steer, u64 reg_id) +{ + struct res_gid *res; + int err; + + res = kzalloc(sizeof *res, GFP_KERNEL); + if (!res) + return -ENOMEM; + + spin_lock_irq(&rqp->mcg_spl); + if (find_gid(dev, slave, rqp, gid)) { + kfree(res); + err = -EEXIST; + } else { + memcpy(res->gid, gid, 16); + res->prot = prot; + res->steer = steer; + res->reg_id = reg_id; + list_add_tail(&res->list, &rqp->mcg_list); + err = 0; + } + spin_unlock_irq(&rqp->mcg_spl); + + return err; +} + +static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, + u8 *gid, enum mlx4_protocol prot, + enum mlx4_steer_type steer, u64 *reg_id) +{ + struct res_gid *res; + int err; + + spin_lock_irq(&rqp->mcg_spl); + res = find_gid(dev, slave, rqp, gid); + if (!res || res->prot != prot || res->steer != steer) + err = -EINVAL; + else { + *reg_id = res->reg_id; + list_del(&res->list); + kfree(res); + err = 0; + } + spin_unlock_irq(&rqp->mcg_spl); + + return err; +} + +static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, + u8 gid[16], int block_loopback, enum mlx4_protocol prot, + enum mlx4_steer_type type, u64 *reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, + block_loopback, prot, + reg_id); + } + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + gid[5] = port; + } + return mlx4_qp_attach_common(dev, qp, gid, + block_loopback, prot, type); + default: + return -EINVAL; + } +} + +static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot, + enum mlx4_steer_type type, u64 reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_flow_detach(dev, reg_id); + case MLX4_STEERING_MODE_B0: + return mlx4_qp_detach_common(dev, qp, gid, prot, type); + default: + return -EINVAL; + } +} + +static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, + u8 *gid, enum mlx4_protocol prot) +{ + int real_port; + + if (prot != MLX4_PROT_ETH) + return 0; + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || + dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + real_port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (real_port < 0) + return -EINVAL; + gid[5] = real_port; + } + + return 0; +} + +int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp qp; /* dummy for calling attach/detach */ + u8 *gid = inbox->buf; + enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; + int err; + int qpn; + struct res_qp *rqp; + u64 reg_id = 0; + int attach = vhcr->op_modifier; + int block_loopback = vhcr->in_modifier >> 31; + u8 steer_type_mask = 2; + enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; + + qpn = vhcr->in_modifier & 0xffffff; + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) + return err; + + qp.qpn = qpn; + if (attach) { + err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, + type, ®_id); + if (err) { + pr_err("Fail to attach rule to qp 0x%x\n", qpn); + goto ex_put; + } + err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); + if (err) + goto ex_detach; + } else { + err = mlx4_adjust_port(dev, slave, gid, prot); + if (err) + goto ex_put; + + err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); + if (err) + goto ex_put; + + err = qp_detach(dev, &qp, gid, prot, type, reg_id); + if (err) + pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", + qpn, reg_id); + } + put_res(dev, slave, qpn, RES_QP); + return err; + +ex_detach: + qp_detach(dev, &qp, gid, prot, type, reg_id); +ex_put: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +/* + * MAC validation for Flow Steering rules. + * VF can attach rules only with a mac address which is assigned to it. + */ +static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, + struct list_head *rlist) +{ + struct mac_res *res, *tmp; + __be64 be_mac; + + /* make sure it isn't multicast or broadcast mac*/ + if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && + !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { + list_for_each_entry_safe(res, tmp, rlist, list) { + be_mac = cpu_to_be64(res->mac << 16); + if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) + return 0; + } + pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", + eth_header->eth.dst_mac, slave); + return -EINVAL; + } + return 0; +} + +/* + * In case of missing eth header, append eth header with a MAC address + * assigned to the VF. + */ +static int add_eth_header(struct mlx4_dev *dev, int slave, + struct mlx4_cmd_mailbox *inbox, + struct list_head *rlist, int header_id) +{ + struct mac_res *res, *tmp; + u8 port; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct mlx4_net_trans_rule_hw_eth *eth_header; + struct mlx4_net_trans_rule_hw_ipv4 *ip_header; + struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; + __be64 be_mac = 0; + __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + port = ctrl->port; + eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); + + /* Clear a space in the inbox for eth header */ + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_IPV4: + ip_header = + (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); + memmove(ip_header, eth_header, + sizeof(*ip_header) + sizeof(*l4_header)); + break; + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) + (eth_header + 1); + memmove(l4_header, eth_header, sizeof(*l4_header)); + break; + default: + return -EINVAL; + } + list_for_each_entry_safe(res, tmp, rlist, list) { + if (port == res->port) { + be_mac = cpu_to_be64(res->mac << 16); + break; + } + } + if (!be_mac) { + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", + port); + return -EINVAL; + } + + memset(eth_header, 0, sizeof(*eth_header)); + eth_header->size = sizeof(*eth_header) >> 2; + eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); + memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); + memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); + + return 0; + +} + +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + + if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; + int err; + int qpn; + struct res_qp *rqp; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct _rule_hw *rule_header; + int header_id; + + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EOPNOTSUPP; + + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (ctrl->port <= 0) + return -EINVAL; + qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + pr_err("Steering rule with qpn 0x%x rejected\n", qpn); + return err; + } + rule_header = (struct _rule_hw *)(ctrl + 1); + header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); + + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + if (validate_eth_header_mac(slave, rule_header, rlist)) { + err = -EINVAL; + goto err_put; + } + break; + case MLX4_NET_TRANS_RULE_ID_IB: + break; + case MLX4_NET_TRANS_RULE_ID_IPV4: + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); + if (add_eth_header(dev, slave, inbox, rlist, header_id)) { + err = -EINVAL; + goto err_put; + } + vhcr->in_modifier += + sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; + break; + default: + pr_err("Corrupted mailbox\n"); + err = -EINVAL; + goto err_put; + } + + err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, + vhcr->in_modifier, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto err_put; + + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); + if (err) { + mlx4_err(dev, "Fail to add flow steering resources\n"); + /* detach rule*/ + mlx4_cmd(dev, vhcr->out_param, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + goto err_put; + } + atomic_inc(&rqp->ref_count); +err_put: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct res_qp *rqp; + struct res_fs_rule *rrule; + + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EOPNOTSUPP; + + err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); + if (err) + return err; + /* Release the rule form busy state before removal */ + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); + if (err) + return err; + + err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to remove flow steering resources\n"); + goto out; + } + + err = mlx4_cmd(dev, vhcr->in_param, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (!err) + atomic_dec(&rqp->ref_count); +out: + put_res(dev, slave, rrule->qpn, RES_QP); + return err; +} + +enum { + BUSY_MAX_RETRIES = 10 +}; + +int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier & 0xffff; + + err = get_res(dev, slave, index, RES_COUNTER, NULL); + if (err) + return err; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + put_res(dev, slave, index, RES_COUNTER); + return err; +} + +static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) +{ + struct res_gid *rgid; + struct res_gid *tmp; + struct mlx4_qp qp; /* dummy for calling attach/detach */ + + list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + mlx4_flow_detach(dev, rgid->reg_id); + break; + case MLX4_STEERING_MODE_B0: + qp.qpn = rqp->local_qpn; + (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, + rgid->prot, rgid->steer); + break; + } + list_del(&rgid->list); + kfree(rgid); + } +} + +static int _move_all_busy(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int print) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; + struct res_common *r; + struct res_common *tmp; + int busy; + + busy = 0; + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(r, tmp, rlist, list) { + if (r->owner == slave) { + if (!r->removing) { + if (r->state == RES_ANY_BUSY) { + if (print) + mlx4_dbg(dev, + "%s id 0x%llx is busy\n", + resource_str(type), + r->res_id); + ++busy; + } else { + r->from_state = r->state; + r->state = RES_ANY_BUSY; + r->removing = 1; + } + } + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + return busy; +} + +static int move_all_busy(struct mlx4_dev *dev, int slave, + enum mlx4_resource type) +{ + unsigned long begin; + int busy; + + begin = jiffies; + do { + busy = _move_all_busy(dev, slave, type, 0); + if (time_after(jiffies, begin + 5 * HZ)) + break; + if (busy) + cond_resched(); + } while (busy); + + if (busy) + busy = _move_all_busy(dev, slave, type, 1); + + return busy; +} +static void rem_slave_qps(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + int state; + u64 in_param; + int qpn; + int err; + + err = move_all_busy(dev, slave, RES_QP); + if (err) + mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == slave) { + qpn = qp->com.res_id; + detach_qp(dev, slave, qp); + state = qp->com.from_state; + while (state != 0) { + switch (state) { + case RES_QP_RESERVED: + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&qp->com.node, + &tracker->res_tree[RES_QP]); + list_del(&qp->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + if (!valid_reserved(dev, slave, qpn)) { + __mlx4_qp_release_range(dev, qpn, 1); + mlx4_release_resource(dev, slave, + RES_QP, 1, 0); + } + kfree(qp); + state = 0; + break; + case RES_QP_MAPPED: + if (!valid_reserved(dev, slave, qpn)) + __mlx4_qp_free_icm(dev, qpn); + state = RES_QP_RESERVED; + break; + case RES_QP_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, + qp->local_qpn, 2, + MLX4_CMD_2RST_QP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", + slave, qp->local_qpn); + atomic_dec(&qp->rcq->ref_count); + atomic_dec(&qp->scq->ref_count); + atomic_dec(&qp->mtt->ref_count); + if (qp->srq) + atomic_dec(&qp->srq->ref_count); + state = RES_QP_MAPPED; + break; + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_srqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *srq_list = + &tracker->slave_list[slave].res_list[RES_SRQ]; + struct res_srq *srq; + struct res_srq *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int srqn; + int err; + + err = move_all_busy(dev, slave, RES_SRQ); + if (err) + mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(srq, tmp, srq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (srq->com.owner == slave) { + srqn = srq->com.res_id; + state = srq->com.from_state; + while (state != 0) { + switch (state) { + case RES_SRQ_ALLOCATED: + __mlx4_srq_free_icm(dev, srqn); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&srq->com.node, + &tracker->res_tree[RES_SRQ]); + list_del(&srq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_SRQ, 1, 0); + kfree(srq); + state = 0; + break; + + case RES_SRQ_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, srqn, 1, + MLX4_CMD_HW2SW_SRQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", + slave, srqn); + + atomic_dec(&srq->mtt->ref_count); + if (srq->cq) + atomic_dec(&srq->cq->ref_count); + state = RES_SRQ_ALLOCATED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_cqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *cq_list = + &tracker->slave_list[slave].res_list[RES_CQ]; + struct res_cq *cq; + struct res_cq *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int cqn; + int err; + + err = move_all_busy(dev, slave, RES_CQ); + if (err) + mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(cq, tmp, cq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { + cqn = cq->com.res_id; + state = cq->com.from_state; + while (state != 0) { + switch (state) { + case RES_CQ_ALLOCATED: + __mlx4_cq_free_icm(dev, cqn); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&cq->com.node, + &tracker->res_tree[RES_CQ]); + list_del(&cq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_CQ, 1, 0); + kfree(cq); + state = 0; + break; + + case RES_CQ_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, cqn, 1, + MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", + slave, cqn); + atomic_dec(&cq->mtt->ref_count); + state = RES_CQ_ALLOCATED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_mrs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mpt_list = + &tracker->slave_list[slave].res_list[RES_MPT]; + struct res_mpt *mpt; + struct res_mpt *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int mptn; + int err; + + err = move_all_busy(dev, slave, RES_MPT); + if (err) + mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (mpt->com.owner == slave) { + mptn = mpt->com.res_id; + state = mpt->com.from_state; + while (state != 0) { + switch (state) { + case RES_MPT_RESERVED: + __mlx4_mpt_release(dev, mpt->key); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&mpt->com.node, + &tracker->res_tree[RES_MPT]); + list_del(&mpt->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_MPT, 1, 0); + kfree(mpt); + state = 0; + break; + + case RES_MPT_MAPPED: + __mlx4_mpt_free_icm(dev, mpt->key); + state = RES_MPT_RESERVED; + break; + + case RES_MPT_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, mptn, 0, + MLX4_CMD_HW2SW_MPT, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", + slave, mptn); + if (mpt->mtt) + atomic_dec(&mpt->mtt->ref_count); + state = RES_MPT_MAPPED; + break; + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_mtts(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *mtt_list = + &tracker->slave_list[slave].res_list[RES_MTT]; + struct res_mtt *mtt; + struct res_mtt *tmp; + int state; + LIST_HEAD(tlist); + int base; + int err; + + err = move_all_busy(dev, slave, RES_MTT); + if (err) + mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (mtt->com.owner == slave) { + base = mtt->com.res_id; + state = mtt->com.from_state; + while (state != 0) { + switch (state) { + case RES_MTT_ALLOCATED: + __mlx4_free_mtt_range(dev, base, + mtt->order); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&mtt->com.node, + &tracker->res_tree[RES_MTT]); + list_del(&mtt->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, RES_MTT, + 1 << mtt->order, 0); + kfree(mtt); + state = 0; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *fs_rule_list = + &tracker->slave_list[slave].res_list[RES_FS_RULE]; + struct res_fs_rule *fs_rule; + struct res_fs_rule *tmp; + int state; + u64 base; + int err; + + err = move_all_busy(dev, slave, RES_FS_RULE); + if (err) + mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (fs_rule->com.owner == slave) { + base = fs_rule->com.res_id; + state = fs_rule->com.from_state; + while (state != 0) { + switch (state) { + case RES_FS_RULE_ALLOCATED: + /* detach rule */ + err = mlx4_cmd(dev, base, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&fs_rule->com.node, + &tracker->res_tree[RES_FS_RULE]); + list_del(&fs_rule->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(fs_rule); + state = 0; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_eqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *eq_list = + &tracker->slave_list[slave].res_list[RES_EQ]; + struct res_eq *eq; + struct res_eq *tmp; + int err; + int state; + LIST_HEAD(tlist); + int eqn; + + err = move_all_busy(dev, slave, RES_EQ); + if (err) + mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(eq, tmp, eq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (eq->com.owner == slave) { + eqn = eq->com.res_id; + state = eq->com.from_state; + while (state != 0) { + switch (state) { + case RES_EQ_RESERVED: + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&eq->com.node, + &tracker->res_tree[RES_EQ]); + list_del(&eq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(eq); + state = 0; + break; + + case RES_EQ_HW: + err = mlx4_cmd(dev, slave, eqn & 0x3ff, + 1, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", + slave, eqn & 0x3ff); + atomic_dec(&eq->mtt->ref_count); + state = RES_EQ_RESERVED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_counters(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *counter_list = + &tracker->slave_list[slave].res_list[RES_COUNTER]; + struct res_counter *counter; + struct res_counter *tmp; + int err; + int index; + + err = move_all_busy(dev, slave, RES_COUNTER); + if (err) + mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(counter, tmp, counter_list, com.list) { + if (counter->com.owner == slave) { + index = counter->com.res_id; + rb_erase(&counter->com.node, + &tracker->res_tree[RES_COUNTER]); + list_del(&counter->com.list); + kfree(counter); + __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + } + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *xrcdn_list = + &tracker->slave_list[slave].res_list[RES_XRCD]; + struct res_xrcdn *xrcd; + struct res_xrcdn *tmp; + int err; + int xrcdn; + + err = move_all_busy(dev, slave, RES_XRCD); + if (err) + mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { + if (xrcd->com.owner == slave) { + xrcdn = xrcd->com.res_id; + rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); + list_del(&xrcd->com.list); + kfree(xrcd); + __mlx4_xrcd_free(dev, xrcdn); + } + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + mlx4_reset_roce_gids(dev, slave); + mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); + rem_slave_vlans(dev, slave); + rem_slave_macs(dev, slave); + rem_slave_fs_rule(dev, slave); + rem_slave_qps(dev, slave); + rem_slave_srqs(dev, slave); + rem_slave_cqs(dev, slave); + rem_slave_mrs(dev, slave); + rem_slave_eqs(dev, slave); + rem_slave_mtts(dev, slave); + rem_slave_counters(dev, slave); + rem_slave_xrcdns(dev, slave); + mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); +} + +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) +{ + struct mlx4_vf_immed_vlan_work *work = + container_of(_work, struct mlx4_vf_immed_vlan_work, work); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *upd_context; + struct mlx4_dev *dev = &work->priv->dev; + struct mlx4_resource_tracker *tracker = + &work->priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[work->slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + u64 qp_path_mask_vlan_ctrl = + ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); + + u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | + (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); + + int err; + int port, errors = 0; + u8 vlan_control; + + if (mlx4_is_slave(dev)) { + mlx4_warn(dev, "Trying to update-qp in slave %d\n", + work->slave); + goto out; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto out; + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else if (!work->vlan_id) + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + + upd_context = mailbox->buf; + upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == work->slave) { + if (qp->com.from_state != RES_QP_HW || + !qp->sched_queue || /* no INIT2RTR trans yet */ + mlx4_is_qp_reserved(dev, qp->local_qpn) || + qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + port = (qp->sched_queue >> 6 & 1) + 1; + if (port != work->port) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) + upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); + else + upd_context->primary_addr_path_mask = + cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); + if (work->vlan_id == MLX4_VGT) { + upd_context->qp_context.param3 = qp->param3; + upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; + upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; + upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; + upd_context->qp_context.pri_path.fl = qp->pri_path_fl; + upd_context->qp_context.pri_path.feup = qp->feup; + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue; + } else { + upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); + upd_context->qp_context.pri_path.vlan_control = vlan_control; + upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; + upd_context->qp_context.pri_path.fvl_rx = + qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; + upd_context->qp_context.pri_path.fl = + qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + upd_context->qp_context.pri_path.feup = + qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue & 0xC7; + upd_context->qp_context.pri_path.sched_queue |= + ((work->qos & 0x7) << 3); + upd_context->qp_mask |= + cpu_to_be64(1ULL << + MLX4_UPD_QP_MASK_QOS_VPP); + upd_context->qp_context.qos_vport = + work->qos_vport; + } + + err = mlx4_cmd(dev, mailbox->dma, + qp->local_qpn & 0xffffff, + 0, MLX4_CMD_UPDATE_QP, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + if (err) { + mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, err); + errors++; + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (errors) + mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", + errors, work->slave, work->port); + + /* unregister previous vlan_id if needed and we had no errors + * while updating the QPs + */ + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && + NO_INDX != work->orig_vlan_ix) + __mlx4_unregister_vlan(&work->priv->dev, work->port, + work->orig_vlan_id); +out: + kfree(work); + return; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c new file mode 100644 index 000000000..094773d88 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/errno.h> +#include <linux/if_ether.h> + +#include <linux/mlx4/cmd.h> + +#include "mlx4.h" + +int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, + enum mlx4_port_type *type) +{ + u64 out_param; + int err = 0; + + err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, + MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) { + mlx4_err(dev, "Sense command failed for port: %d\n", port); + return err; + } + + if (out_param > 2) { + mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param); + return -EINVAL; + } + + *type = out_param; + return 0; +} + +void mlx4_do_sense_ports(struct mlx4_dev *dev, + enum mlx4_port_type *stype, + enum mlx4_port_type *defaults) +{ + struct mlx4_sense *sense = &mlx4_priv(dev)->sense; + int err; + int i; + + for (i = 1; i <= dev->caps.num_ports; i++) { + stype[i - 1] = 0; + if (sense->do_sense_port[i] && sense->sense_allowed[i] && + dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { + err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]); + if (err) + stype[i - 1] = defaults[i - 1]; + } else + stype[i - 1] = defaults[i - 1]; + } + + /* + * If sensed nothing, remain in current configuration. + */ + for (i = 0; i < dev->caps.num_ports; i++) + stype[i] = stype[i] ? stype[i] : defaults[i]; + +} + +static void mlx4_sense_port(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, + sense_poll); + struct mlx4_dev *dev = sense->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + enum mlx4_port_type stype[MLX4_MAX_PORTS]; + + mutex_lock(&priv->port_mutex); + mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]); + + if (mlx4_check_port_params(dev, stype)) + goto sense_again; + + if (mlx4_change_port_types(dev, stype)) + mlx4_err(dev, "Failed to change port_types\n"); + +sense_again: + mutex_unlock(&priv->port_mutex); + queue_delayed_work(mlx4_wq , &sense->sense_poll, + round_jiffies_relative(MLX4_SENSE_RANGE)); +} + +void mlx4_start_sense(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_sense *sense = &priv->sense; + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) + return; + + queue_delayed_work(mlx4_wq , &sense->sense_poll, + round_jiffies_relative(MLX4_SENSE_RANGE)); +} + +void mlx4_stop_sense(struct mlx4_dev *dev) +{ + cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll); +} + +void mlx4_sense_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_sense *sense = &priv->sense; + int port; + + sense->dev = dev; + for (port = 1; port <= dev->caps.num_ports; port++) + sense->do_sense_port[port] = 1; + + INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c new file mode 100644 index 000000000..67146624e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include <linux/mlx4/cmd.h> +#include <linux/mlx4/srq.h> +#include <linux/export.h> +#include <linux/gfp.h> + +#include "mlx4.h" +#include "icm.h" + +void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_srq *srq; + + spin_lock(&srq_table->lock); + + srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&srq_table->lock); + + if (!srq) { + mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); + return; + } + + srq->event(srq, event_type); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); +} + +static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd(dev, mailbox->dma, srq_num, 0, + MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, + mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) +{ + return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); +} + +static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + + *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); + if (*srqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL); + if (err) + goto err_put; + return 0; + +err_put: + mlx4_table_put(dev, &srq_table->table, *srqn); + +err_out: + mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR); + return err; +} + +static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *srqn = get_param_l(&out_param); + + return err; + } + return __mlx4_srq_alloc_icm(dev, srqn); +} + +void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + + mlx4_table_put(dev, &srq_table->cmpt_table, srqn); + mlx4_table_put(dev, &srq_table->table, srqn); + mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR); +} + +static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, srqn); + if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed freeing cq:%d\n", srqn); + return; + } + __mlx4_srq_free_icm(dev, srqn); +} + +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + u64 mtt_addr; + int err; + + err = mlx4_srq_alloc_icm(dev, &srq->srqn); + if (err) + return err; + + spin_lock_irq(&srq_table->lock); + err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); + spin_unlock_irq(&srq_table->lock); + if (err) + goto err_icm; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_radix; + } + + srq_context = mailbox->buf; + srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | + srq->srqn); + srq_context->logstride = srq->wqe_shift - 4; + srq_context->xrcd = cpu_to_be16(xrcd); + srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); + srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, mtt); + srq_context->mtt_base_addr_h = mtt_addr >> 32; + srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + srq_context->pd = cpu_to_be32(pdn); + srq_context->db_rec_addr = cpu_to_be64(db_rec); + + err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + goto err_radix; + + atomic_set(&srq->refcount, 1); + init_completion(&srq->free); + + return 0; + +err_radix: + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + +err_icm: + mlx4_srq_free_icm(dev, srq->srqn); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_srq_alloc); + +void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); + if (err) + mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); + + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + mlx4_srq_free_icm(dev, srq->srqn); +} +EXPORT_SYMBOL_GPL(mlx4_srq_free); + +int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) +{ + return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark); +} +EXPORT_SYMBOL_GPL(mlx4_srq_arm); + +int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + srq_context = mailbox->buf; + + err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); + if (err) + goto err_out; + *limit_watermark = be16_to_cpu(srq_context->limit_watermark); + +err_out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_srq_query); + +int mlx4_init_srq_table(struct mlx4_dev *dev) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + spin_lock_init(&srq_table->lock); + INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; + + err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, + dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); + if (err) + return err; + + return 0; +} + +void mlx4_cleanup_srq_table(struct mlx4_dev *dev) +{ + if (mlx4_is_slave(dev)) + return; + mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); +} + +struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_srq *srq; + unsigned long flags; + + spin_lock_irqsave(&srq_table->lock, flags); + srq = radix_tree_lookup(&srq_table->tree, + srqn & (dev->caps.num_srqs - 1)); + spin_unlock_irqrestore(&srq_table->lock, flags); + + return srq; +} +EXPORT_SYMBOL_GPL(mlx4_srq_lookup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig new file mode 100644 index 000000000..8ff57e8e3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -0,0 +1,8 @@ +# +# Mellanox driver configuration +# + +config MLX5_CORE + tristate + depends on PCI + default n diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile new file mode 100644 index 000000000..105780bb9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_MLX5_CORE) += mlx5_core.o + +mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ + health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + mad.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c new file mode 100644 index 000000000..ac0f7bf4b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/export.h> +#include <linux/bitmap.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/mlx5/driver.h> + +#include "mlx5_core.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, + struct mlx5_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) + pages[i] = virt_to_page(buf->page_list[i].buf); + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + mlx5_buf_free(dev, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(mlx5_buf_alloc); + +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) +{ + int i; + + if (buf->nbufs == 1) + dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + else { + if (BITS_PER_LONG == 64) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(mlx5_buf_free); + +static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) +{ + struct mlx5_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + if (!pgdir) + return NULL; + + bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); + pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, + &pgdir->db_dma, GFP_KERNEL); + if (!pgdir->db_page) { + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, + struct mlx5_db *db) +{ + int offset; + int i; + + i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); + if (i >= MLX5_DB_PER_PAGE) + return -ENOMEM; + + __clear_bit(i, pgdir->bitmap); + + db->u.pgdir = pgdir; + db->index = i; + offset = db->index * L1_CACHE_BYTES; + db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); + db->dma = pgdir->db_dma + offset; + + db->db[0] = 0; + db->db[1] = 0; + + return 0; +} + +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + struct mlx5_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&dev->priv.pgdir_mutex); + + list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) + if (!mlx5_alloc_db_from_pgdir(pgdir, db)) + goto out; + + pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &dev->priv.pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); + +out: + mutex_unlock(&dev->priv.pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx5_db_alloc); + +void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + mutex_lock(&dev->priv.pgdir_mutex); + + __set_bit(db->index, db->u.pgdir->bitmap); + + if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { + dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + kfree(db->u.pgdir); + } + + mutex_unlock(&dev->priv.pgdir_mutex); +} +EXPORT_SYMBOL_GPL(mlx5_db_free); + + +void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) +{ + u64 addr; + int i; + + for (i = 0; i < buf->npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << buf->page_shift); + else + addr = buf->page_list[i].map; + + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(mlx5_fill_page_array); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c new file mode 100644 index 000000000..e3273faf4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -0,0 +1,1587 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <asm-generic/kmap_types.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/random.h> +#include <linux/io-mapping.h> +#include <linux/mlx5/driver.h> +#include <linux/debugfs.h> + +#include "mlx5_core.h" + +enum { + CMD_IF_REV = 5, +}; + +enum { + CMD_MODE_POLLING, + CMD_MODE_EVENTS +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + MLX5_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, +}; + +enum { + MLX5_CMD_DELIVERY_STAT_OK = 0x0, + MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, + MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, + MLX5_CMD_STAT_BAD_OP_ERR = 0x2, + MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, + MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + MLX5_CMD_STAT_BAD_RES_ERR = 0x5, + MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_LIM_ERR = 0x8, + MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + MLX5_CMD_STAT_IX_ERR = 0xa, + MLX5_CMD_STAT_NO_RES_ERR = 0xf, + MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, + MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, + struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, + void *uout, int uout_size, + mlx5_cmd_cbk_t cbk, + void *context, int page_queue) +{ + gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; + struct mlx5_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), alloc_flags); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->uout = uout; + ent->uout_size = uout_size; + ent->callback = cbk; + ent->context = context; + ent->cmd = cmd; + ent->page_queue = page_queue; + + return ent; +} + +static u8 alloc_token(struct mlx5_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + cmd->token++; + if (cmd->token == 0) + cmd->token++; + token = cmd->token; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int alloc_ent(struct mlx5_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOMEM; +} + +static void free_ent(struct mlx5_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static u8 xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int verify_block_sig(struct mlx5_cmd_prot_block *block) +{ + if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, + int csum) +{ + block->token = token; + if (csum) { + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - + sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); + } +} + +static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) +{ + struct mlx5_cmd_mailbox *next = msg->next; + + while (next) { + calc_block_sig(next->buf, token, csum); + next = next->next; + } +} + +static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) +{ + ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); + calc_chain_sig(ent->in, ent->token, csum); + calc_chain_sig(ent->out, ent->token, csum); +} + +static void poll_timeout(struct mlx5_cmd_work_ent *ent) +{ + unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); + u8 own; + + do { + own = ent->lay->status_own; + if (!(own & CMD_OWNER_HW)) { + ent->ret = 0; + return; + } + usleep_range(5000, 10000); + } while (time_before(jiffies, poll_end)); + + ent->ret = -ETIMEDOUT; +} + +static void free_cmd(struct mlx5_cmd_work_ent *ent) +{ + kfree(ent); +} + + +static int verify_signature(struct mlx5_cmd_work_ent *ent) +{ + struct mlx5_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xor8_buf(ent->lay, sizeof(*ent->lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +static void dump_buf(void *buf, int size, int data_only, int offset) +{ + __be32 *p = buf; + int i; + + for (i = 0; i < size; i += 16) { + pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), + be32_to_cpu(p[3])); + p += 4; + offset += 16; + } + if (!data_only) + pr_debug("\n"); +} + +const char *mlx5_command_str(int command) +{ + switch (command) { + case MLX5_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case MLX5_CMD_OP_SET_HCA_CAP: + return "SET_HCA_CAP"; + + case MLX5_CMD_OP_QUERY_ADAPTER: + return "QUERY_ADAPTER"; + + case MLX5_CMD_OP_INIT_HCA: + return "INIT_HCA"; + + case MLX5_CMD_OP_TEARDOWN_HCA: + return "TEARDOWN_HCA"; + + case MLX5_CMD_OP_ENABLE_HCA: + return "MLX5_CMD_OP_ENABLE_HCA"; + + case MLX5_CMD_OP_DISABLE_HCA: + return "MLX5_CMD_OP_DISABLE_HCA"; + + case MLX5_CMD_OP_QUERY_PAGES: + return "QUERY_PAGES"; + + case MLX5_CMD_OP_MANAGE_PAGES: + return "MANAGE_PAGES"; + + case MLX5_CMD_OP_CREATE_MKEY: + return "CREATE_MKEY"; + + case MLX5_CMD_OP_QUERY_MKEY: + return "QUERY_MKEY"; + + case MLX5_CMD_OP_DESTROY_MKEY: + return "DESTROY_MKEY"; + + case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: + return "QUERY_SPECIAL_CONTEXTS"; + + case MLX5_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case MLX5_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case MLX5_CMD_OP_QUERY_EQ: + return "QUERY_EQ"; + + case MLX5_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case MLX5_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case MLX5_CMD_OP_QUERY_CQ: + return "QUERY_CQ"; + + case MLX5_CMD_OP_MODIFY_CQ: + return "MODIFY_CQ"; + + case MLX5_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case MLX5_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case MLX5_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case MLX5_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case MLX5_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case MLX5_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case MLX5_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case MLX5_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case MLX5_CMD_OP_2RST_QP: + return "2RST_QP"; + + case MLX5_CMD_OP_QUERY_QP: + return "QUERY_QP"; + + case MLX5_CMD_OP_MAD_IFC: + return "MAD_IFC"; + + case MLX5_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case MLX5_CMD_OP_CREATE_PSV: + return "CREATE_PSV"; + + case MLX5_CMD_OP_DESTROY_PSV: + return "DESTROY_PSV"; + + case MLX5_CMD_OP_CREATE_SRQ: + return "CREATE_SRQ"; + + case MLX5_CMD_OP_DESTROY_SRQ: + return "DESTROY_SRQ"; + + case MLX5_CMD_OP_QUERY_SRQ: + return "QUERY_SRQ"; + + case MLX5_CMD_OP_ARM_RQ: + return "ARM_RQ"; + + case MLX5_CMD_OP_RESIZE_SRQ: + return "RESIZE_SRQ"; + + case MLX5_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; + + case MLX5_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; + + case MLX5_CMD_OP_ALLOC_UAR: + return "ALLOC_UAR"; + + case MLX5_CMD_OP_DEALLOC_UAR: + return "DEALLOC_UAR"; + + case MLX5_CMD_OP_ATTACH_TO_MCG: + return "ATTACH_TO_MCG"; + + case MLX5_CMD_OP_DETACH_FROM_MCG: + return "DETACH_FROM_MCG"; + + case MLX5_CMD_OP_ALLOC_XRCD: + return "ALLOC_XRCD"; + + case MLX5_CMD_OP_DEALLOC_XRCD: + return "DEALLOC_XRCD"; + + case MLX5_CMD_OP_ACCESS_REG: + return "MLX5_CMD_OP_ACCESS_REG"; + + default: return "unknown command opcode"; + } +} + +static void dump_command(struct mlx5_core_dev *dev, + struct mlx5_cmd_work_ent *ent, int input) +{ + u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); + struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; + struct mlx5_cmd_mailbox *next = msg->next; + int data_only; + u32 offset = 0; + int dump_len; + + data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); + + if (data_only) + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, + "dump command data %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + else + mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (data_only) { + if (input) { + dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); + offset += sizeof(ent->lay->in); + } else { + dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); + offset += sizeof(ent->lay->out); + } + } else { + dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); + offset += sizeof(*ent->lay); + } + + while (next && offset < msg->len) { + if (data_only) { + dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); + dump_buf(next->buf, dump_len, 1, offset); + offset += MLX5_CMD_DATA_BLOCK_SIZE; + } else { + mlx5_core_dbg(dev, "command block:\n"); + dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); + offset += sizeof(struct mlx5_cmd_prot_block); + } + next = next->next; + } + + if (data_only) + pr_debug("\n"); +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); + struct mlx5_cmd *cmd = ent->cmd; + struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); + struct mlx5_cmd_layout *lay; + struct semaphore *sem; + + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + mlx5_core_err(dev, "failed to allocate command entry\n"); + up(sem); + return; + } + } else { + ent->idx = cmd->max_reg_cmds; + } + + ent->token = alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + lay = get_inst(cmd, ent->idx); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + ent->op = be32_to_cpu(lay->in[0]) >> 16; + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = MLX5_PCI_CMD_XPORT; + lay->token = ent->token; + lay->status_own = CMD_OWNER_HW; + set_signature(ent, !cmd->checksum_disabled); + dump_command(dev, ent, 1); + ent->ts1 = ktime_get_ns(); + + /* ring doorbell after the descriptor is valid */ + mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); + wmb(); + iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); + mmiowb(); + /* if not in polling don't use ent after this point */ + if (cmd->mode == CMD_MODE_POLLING) { + poll_timeout(ent); + /* make sure we read the descriptor after ownership is SW */ + rmb(); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + } +} + +static const char *deliv_status_to_str(u8 status) +{ + switch (status) { + case MLX5_CMD_DELIVERY_STAT_OK: + return "no errors"; + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command ouput length error"; + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 msg_to_opcode(struct mlx5_cmd_msg *in) +{ + struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); + struct mlx5_cmd *cmd = &dev->cmd; + int err; + + if (cmd->mode == CMD_MODE_POLLING) { + wait_for_completion(&ent->done); + err = ent->ret; + } else { + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = 0; + } + if (err == -ETIMEDOUT) { + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + } + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", + err, deliv_status_to_str(ent->status), ent->status); + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, void *uout, int uout_size, + mlx5_cmd_cbk_t callback, + void *context, int page_queue, u8 *status) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + struct mlx5_cmd_stats *stats; + int err = 0; + s64 ds; + u16 op; + + if (callback && page_queue) + return -EINVAL; + + ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, + page_queue); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + if (!callback) + init_completion(&ent->done); + + INIT_WORK(&ent->work, cmd_work_handler); + if (page_queue) { + cmd_work_handler(&ent->work); + } else if (!queue_work(cmd->wq, &ent->work)) { + mlx5_core_warn(dev, "failed to queue work\n"); + err = -ENOMEM; + goto out_free; + } + + if (!callback) { + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out; + + ds = ent->ts2 - ent->ts1; + op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock_irq(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock_irq(&stats->lock); + } + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + mlx5_command_str(op), ds); + *status = ent->status; + free_cmd(ent); + } + + return err; + +out_free: + free_cmd(ent); +out: + return err; +} + +static ssize_t dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char lbuf[3]; + int err; + + if (!dbg->in_msg || !dbg->out_msg) + return -ENOMEM; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EFAULT; + + lbuf[sizeof(lbuf) - 1] = 0; + + if (strcmp(lbuf, "go")) + return -EINVAL; + + err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); + + return err ? err : count; +} + + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dbg_write, +}; + +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, + gfp_t flags) +{ + struct mlx5_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + mlx5_core_dbg(dev, "failed allocation\n"); + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); + mailbox->next = NULL; + + return mailbox; +} + +static void free_cmd_box(struct mlx5_core_dev *dev, + struct mlx5_cmd_mailbox *mailbox) +{ + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, + gfp_t flags, int size) +{ + struct mlx5_cmd_mailbox *tmp, *head = NULL; + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), flags); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(dev, flags); + if (IS_ERR(tmp)) { + mlx5_core_warn(dev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(dev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg) +{ + struct mlx5_cmd_mailbox *head = msg->next; + struct mlx5_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(dev, head); + head = next; + } + kfree(msg); +} + +static ssize_t data_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + void *ptr; + int err; + + if (*pos != 0) + return -EINVAL; + + kfree(dbg->in_msg); + dbg->in_msg = NULL; + dbg->inlen = 0; + + ptr = kzalloc(count, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (copy_from_user(ptr, buf, count)) { + err = -EFAULT; + goto out; + } + dbg->in_msg = ptr; + dbg->inlen = count; + + *pos = count; + + return count; + +out: + kfree(ptr); + return err; +} + +static ssize_t data_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int copy; + + if (*pos) + return 0; + + if (!dbg->out_msg) + return -ENOMEM; + + copy = min_t(int, count, dbg->outlen); + if (copy_to_user(buf, dbg->out_msg, copy)) + return -EFAULT; + + *pos += copy; + + return copy; +} + +static const struct file_operations dfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, +}; + +static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen[8]; + int err; + + if (*pos) + return 0; + + err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); + if (err < 0) + return err; + + if (copy_to_user(buf, &outlen, err)) + return -EFAULT; + + *pos += err; + + return err; +} + +static ssize_t outlen_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen_str[8]; + int outlen; + void *ptr; + int err; + + if (*pos != 0 || count > 6) + return -EINVAL; + + kfree(dbg->out_msg); + dbg->out_msg = NULL; + dbg->outlen = 0; + + if (copy_from_user(outlen_str, buf, count)) + return -EFAULT; + + outlen_str[7] = 0; + + err = sscanf(outlen_str, "%d", &outlen); + if (err < 0) + return err; + + ptr = kzalloc(outlen, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dbg->out_msg = ptr; + dbg->outlen = outlen; + + *pos = count; + + return count; +} + +static const struct file_operations olfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = outlen_write, + .read = outlen_read, +}; + +static void set_wqname(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", + dev_name(&dev->pdev->dev)); +} + +static void clean_debug_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + + if (!mlx5_debugfs_root) + return; + + mlx5_cmdif_debugfs_cleanup(dev); + debugfs_remove_recursive(dbg->dbg_root); +} + +static int create_debugfs_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int err = -ENOMEM; + + if (!mlx5_debugfs_root) + return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); + if (!dbg->dbg_root) + return err; + + dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_in) + goto err_dbg; + + dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_out) + goto err_dbg; + + dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, + dev, &olfops); + if (!dbg->dbg_outlen) + goto err_dbg; + + dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, + &dbg->status); + if (!dbg->dbg_status) + goto err_dbg; + + dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); + if (!dbg->dbg_run) + goto err_dbg; + + mlx5_cmdif_debugfs_init(dev); + + return 0; + +err_dbg: + clean_debug_files(dev); + return err; +} + +void mlx5_cmd_use_events(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + + cmd->mode = CMD_MODE_EVENTS; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + cmd->mode = CMD_MODE_POLLING; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) +{ + unsigned long flags; + + if (msg->cache) { + spin_lock_irqsave(&msg->cache->lock, flags); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock_irqrestore(&msg->cache->lock, flags); + } else { + mlx5_free_cmd_msg(dev, msg); + } +} + +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + mlx5_cmd_cbk_t callback; + void *context; + int err; + int i; + s64 ds; + struct mlx5_cmd_stats *stats; + unsigned long flags; + + for (i = 0; i < (1 << cmd->log_sz); i++) { + if (test_bit(i, &vector)) { + struct semaphore *sem; + + ent = cmd->ent_arr[i]; + if (ent->page_queue) + sem = &cmd->pages_sem; + else + sem = &cmd->sem; + ent->ts2 = ktime_get_ns(); + memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); + dump_command(dev, ent, 0); + if (!ent->ret) { + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); + else + ent->ret = 0; + ent->status = ent->lay->status_own >> 1; + mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", + ent->ret, deliv_status_to_str(ent->status), ent->status); + } + free_ent(cmd, ent->idx); + if (ent->callback) { + ds = ent->ts2 - ent->ts1; + if (ent->op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[ent->op]; + spin_lock_irqsave(&stats->lock, flags); + stats->sum += ds; + ++stats->n; + spin_unlock_irqrestore(&stats->lock, flags); + } + + callback = ent->callback; + context = ent->context; + err = ent->ret; + if (!err) + err = mlx5_copy_from_msg(ent->uout, + ent->out, + ent->uout_size); + + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + + free_cmd(ent); + callback(err, context); + } else { + complete(&ent->done); + } + up(sem); + } + } +} +EXPORT_SYMBOL(mlx5_cmd_comp_handler); + +static int status_to_err(u8 status) +{ + return status ? -1 : 0; /* TBD more meaningful codes */ +} + +static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, + gfp_t gfp) +{ + struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct mlx5_cmd *cmd = &dev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock_irq(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock_irq(&ent->lock); + } + + if (IS_ERR(msg)) + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); + + return msg; +} + +static int is_manage_pages(struct mlx5_inbox_hdr *in) +{ + return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; +} + +static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size, mlx5_cmd_cbk_t callback, void *context) +{ + struct mlx5_cmd_msg *inb; + struct mlx5_cmd_msg *outb; + int pages_queue; + gfp_t gfp; + int err; + u8 status = 0; + + pages_queue = is_manage_pages(in); + gfp = callback ? GFP_ATOMIC : GFP_KERNEL; + + inb = alloc_msg(dev, in_size, gfp); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = mlx5_copy_to_msg(inb, in, in_size); + if (err) { + mlx5_core_warn(dev, "err %d\n", err); + goto out_in; + } + + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, + pages_queue, &status); + if (err) + goto out_out; + + mlx5_core_dbg(dev, "err %d, status %d\n", err, status); + if (status) { + err = status_to_err(status); + goto out_out; + } + + if (!callback) + err = mlx5_copy_from_msg(out, outb, out_size); + +out_out: + if (!callback) + mlx5_free_cmd_msg(dev, outb); + +out_in: + if (!callback) + free_msg(dev, inb); + return err; +} + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size) +{ + return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); +} +EXPORT_SYMBOL(mlx5_cmd_exec); + +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context) +{ + return cmd_exec(dev, in, in_size, out, out_size, callback, context); +} +EXPORT_SYMBOL(mlx5_cmd_exec_cb); + +static void destroy_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + struct mlx5_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } +} + +static int create_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +ex_err: + destroy_msg_cache(dev); + return err; +} + +static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +{ + struct device *ddev = &dev->pdev->dev; + + cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, + &cmd->alloc_dma, GFP_KERNEL); + if (!cmd->cmd_alloc_buf) + return -ENOMEM; + + /* make sure it is aligned to 4K */ + if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { + cmd->cmd_buf = cmd->cmd_alloc_buf; + cmd->dma = cmd->alloc_dma; + cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; + return 0; + } + + dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, + cmd->alloc_dma); + cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, + 2 * MLX5_ADAPTER_PAGE_SIZE - 1, + &cmd->alloc_dma, GFP_KERNEL); + if (!cmd->cmd_alloc_buf) + return -ENOMEM; + + cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); + cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); + cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; + return 0; +} + +static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +{ + struct device *ddev = &dev->pdev->dev; + + dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, + cmd->alloc_dma); +} + +int mlx5_cmd_init(struct mlx5_core_dev *dev) +{ + int size = sizeof(struct mlx5_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct mlx5_cmd *cmd = &dev->cmd; + u32 cmd_h, cmd_l; + u16 cmd_if_rev; + int err; + int i; + + cmd_if_rev = cmdif_rev(dev); + if (cmd_if_rev != CMD_IF_REV) { + dev_err(&dev->pdev->dev, + "Driver cmdif rev(%d) differs from firmware's(%d)\n", + CMD_IF_REV, cmd_if_rev); + return -EINVAL; + } + + cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + err = alloc_cmd_page(dev, cmd); + if (err) + goto err_free_pool; + + cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; + cmd->log_sz = cmd_l >> 4 & 0xf; + cmd->log_stride = cmd_l & 0xf; + if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { + dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_free_page; + } + + if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { + dev_err(&dev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_free_page; + } + + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + if (cmd->cmdif_rev > CMD_IF_REV) { + dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", + CMD_IF_REV, cmd->cmdif_rev); + err = -ENOTSUPP; + goto err_free_page; + } + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + sema_init(&cmd->pages_sem, 1); + + cmd_h = (u32)((u64)(cmd->dma) >> 32); + cmd_l = (u32)(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&dev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_free_page; + } + + iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); + iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); + + cmd->mode = CMD_MODE_POLLING; + + err = create_msg_cache(dev); + if (err) { + dev_err(&dev->pdev->dev, "failed to create command cache\n"); + goto err_free_page; + } + + set_wqname(dev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_cache; + } + + err = create_debugfs_files(dev); + if (err) { + err = -ENOMEM; + goto err_wq; + } + + return 0; + +err_wq: + destroy_workqueue(cmd->wq); + +err_cache: + destroy_msg_cache(dev); + +err_free_page: + free_cmd_page(dev, cmd); + +err_free_pool: + pci_pool_destroy(cmd->pool); + + return err; +} +EXPORT_SYMBOL(mlx5_cmd_init); + +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + clean_debug_files(dev); + destroy_workqueue(cmd->wq); + destroy_msg_cache(dev); + free_cmd_page(dev, cmd); + pci_pool_destroy(cmd->pool); +} +EXPORT_SYMBOL(mlx5_cmd_cleanup); + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: + return "OK"; + case MLX5_CMD_STAT_INT_ERR: + return "internal error"; + case MLX5_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case MLX5_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case MLX5_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case MLX5_CMD_STAT_RES_BUSY: + return "resource busy"; + case MLX5_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case MLX5_CMD_STAT_IX_ERR: + return "bad index"; + case MLX5_CMD_STAT_NO_RES_ERR: + return "no resources"; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case MLX5_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +static int cmd_status_to_err(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: return 0; + case MLX5_CMD_STAT_INT_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_IX_ERR: return -EINVAL; + case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} + +/* this will be available till all the commands use set/get macros */ +int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(hdr->status), hdr->status, + be32_to_cpu(hdr->syndrome)); + + return cmd_status_to_err(hdr->status); +} + +int mlx5_cmd_status_to_err_v2(void *ptr) +{ + u32 syndrome; + u8 status; + + status = be32_to_cpu(*(__be32 *)ptr) >> 24; + if (!status) + return 0; + + syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(status), status, syndrome); + + return cmd_status_to_err(status); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c new file mode 100644 index 000000000..eb0cf81f5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/hardirq.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <rdma/ib_verbs.h> +#include <linux/mlx5/cq.h> +#include "mlx5_core.h" + +void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) +{ + struct mlx5_core_cq *cq; + struct mlx5_cq_table *table = &dev->priv.cq_table; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + + +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_create_cq_mbox_in *in, int inlen) +{ + int err; + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_create_cq_mbox_out out; + struct mlx5_destroy_cq_mbox_in din; + struct mlx5_destroy_cq_mbox_out dout; + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); + memset(&out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + cq->cons_index = 0; + cq->arm_sn = 0; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + if (err) + goto err_cmd; + + cq->pid = current->pid; + err = mlx5_debug_cq_add(dev, cq); + if (err) + mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", + cq->cqn); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_cq); + +int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_destroy_cq_mbox_in in; + struct mlx5_destroy_cq_mbox_out out; + struct mlx5_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + synchronize_irq(cq->irqn); + + mlx5_debug_cq_remove(dev, cq); + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_cq); + +int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_query_cq_mbox_out *out) +{ + struct mlx5_query_cq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_cq); + + +int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_modify_cq_mbox_in *in, int in_sz) +{ + struct mlx5_modify_cq_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); + err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_modify_cq); + +int mlx5_init_cq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + int err; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + err = mlx5_cq_debugfs_init(dev); + + return err; +} + +void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) +{ + mlx5_cq_debugfs_cleanup(dev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c new file mode 100644 index 000000000..5210d92e6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -0,0 +1,610 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/cq.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" + +enum { + QP_PID, + QP_STATE, + QP_XPORT, + QP_MTU, + QP_N_RECV, + QP_RECV_SZ, + QP_N_SEND, + QP_LOG_PG_SZ, + QP_RQPN, +}; + +static char *qp_fields[] = { + [QP_PID] = "pid", + [QP_STATE] = "state", + [QP_XPORT] = "transport", + [QP_MTU] = "mtu", + [QP_N_RECV] = "num_recv", + [QP_RECV_SZ] = "rcv_wqe_sz", + [QP_N_SEND] = "num_send", + [QP_LOG_PG_SZ] = "log2_page_sz", + [QP_RQPN] = "remote_qpn", +}; + +enum { + EQ_NUM_EQES, + EQ_INTR, + EQ_LOG_PG_SZ, +}; + +static char *eq_fields[] = { + [EQ_NUM_EQES] = "num_eqes", + [EQ_INTR] = "intr", + [EQ_LOG_PG_SZ] = "log_page_size", +}; + +enum { + CQ_PID, + CQ_NUM_CQES, + CQ_LOG_PG_SZ, +}; + +static char *cq_fields[] = { + [CQ_PID] = "pid", + [CQ_NUM_CQES] = "num_cqes", + [CQ_LOG_PG_SZ] = "log_page_size", +}; + +struct dentry *mlx5_debugfs_root; +EXPORT_SYMBOL(mlx5_debugfs_root); + +void mlx5_register_debugfs(void) +{ + mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL); + if (IS_ERR_OR_NULL(mlx5_debugfs_root)) + mlx5_debugfs_root = NULL; +} + +void mlx5_unregister_debugfs(void) +{ + debugfs_remove(mlx5_debugfs_root); +} + +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + atomic_set(&dev->num_qps, 0); + + dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); + if (!dev->priv.qp_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.qp_debugfs); +} + +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); + if (!dev->priv.eq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.eq_debugfs); +} + +static ssize_t average_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + u64 field = 0; + int ret; + char tbuf[22]; + + if (*pos) + return 0; + + stats = filp->private_data; + spin_lock_irq(&stats->lock); + if (stats->n) + field = div64_u64(stats->sum, stats->n); + spin_unlock_irq(&stats->lock); + ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); + if (ret > 0) { + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; + } + + *pos += ret; + return ret; +} + + +static ssize_t average_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + + stats = filp->private_data; + spin_lock_irq(&stats->lock); + stats->sum = 0; + stats->n = 0; + spin_unlock_irq(&stats->lock); + + *pos += count; + + return count; +} + +static const struct file_operations stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = average_read, + .write = average_write, +}; + +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_stats *stats; + struct dentry **cmd; + const char *namep; + int err; + int i; + + if (!mlx5_debugfs_root) + return 0; + + cmd = &dev->priv.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); + if (!*cmd) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { + stats = &dev->cmd.stats[i]; + namep = mlx5_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmd); + if (!stats->root) { + mlx5_core_warn(dev, "failed adding command %d\n", + i); + err = -ENOMEM; + goto out; + } + + stats->avg = debugfs_create_file("average", 0400, + stats->root, stats, + &stats_fops); + if (!stats->avg) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + + stats->count = debugfs_create_u64("n", 0400, + stats->root, + &stats->n); + if (!stats->count) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + } + } + + return 0; +out: + debugfs_remove_recursive(dev->priv.cmdif_debugfs); + return err; +} + +void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cmdif_debugfs); +} + +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); + if (!dev->priv.cq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cq_debugfs); +} + +static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + int index, int *is_str) +{ + struct mlx5_query_qp_mbox_out *out; + struct mlx5_qp_context *ctx; + u64 param = 0; + int err; + int no_sq; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query qp\n"); + goto out; + } + + *is_str = 0; + ctx = &out->ctx; + switch (index) { + case QP_PID: + param = qp->pid; + break; + case QP_STATE: + param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28); + *is_str = 1; + break; + case QP_XPORT: + param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff); + *is_str = 1; + break; + case QP_MTU: + switch (ctx->mtu_msgmax >> 5) { + case IB_MTU_256: + param = 256; + break; + case IB_MTU_512: + param = 512; + break; + case IB_MTU_1024: + param = 1024; + break; + case IB_MTU_2048: + param = 2048; + break; + case IB_MTU_4096: + param = 4096; + break; + default: + param = 0; + } + break; + case QP_N_RECV: + param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); + break; + case QP_RECV_SZ: + param = 1 << ((ctx->rq_size_stride & 7) + 4); + break; + case QP_N_SEND: + no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15; + if (!no_sq) + param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11); + else + param = 0; + break; + case QP_LOG_PG_SZ: + param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f; + param += 12; + break; + case QP_RQPN: + param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff; + break; + } + +out: + kfree(out); + return param; +} + +static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + int index) +{ + struct mlx5_query_eq_mbox_out *out; + struct mlx5_eq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_eq_query(dev, eq, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query eq\n"); + goto out; + } + + switch (index) { + case EQ_NUM_EQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case EQ_INTR: + param = ctx->intr; + break; + case EQ_LOG_PG_SZ: + param = (ctx->log_page_size & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int index) +{ + struct mlx5_query_cq_mbox_out *out; + struct mlx5_cq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_query_cq(dev, cq, out); + if (err) { + mlx5_core_warn(dev, "failed to query cq\n"); + goto out; + } + + switch (index) { + case CQ_PID: + param = cq->pid; + break; + case CQ_NUM_CQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case CQ_LOG_PG_SZ: + param = (ctx->log_pg_sz & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_field_desc *desc; + struct mlx5_rsc_debug *d; + char tbuf[18]; + int is_str = 0; + u64 field; + int ret; + + if (*pos) + return 0; + + desc = filp->private_data; + d = (void *)(desc - desc->i) - sizeof(*d); + switch (d->type) { + case MLX5_DBG_RSC_QP: + field = qp_read_field(d->dev, d->object, desc->i, &is_str); + break; + + case MLX5_DBG_RSC_EQ: + field = eq_read_field(d->dev, d->object, desc->i); + break; + + case MLX5_DBG_RSC_CQ: + field = cq_read_field(d->dev, d->object, desc->i); + break; + + default: + mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type); + return -EINVAL; + } + + + if (is_str) + ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field); + else + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + + if (ret > 0) { + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; + } + + *pos += ret; + return ret; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbg_read, +}; + +static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, + struct dentry *root, struct mlx5_rsc_debug **dbg, + int rsn, char **field, int nfile, void *data) +{ + struct mlx5_rsc_debug *d; + char resn[32]; + int err; + int i; + + d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->dev = dev; + d->object = data; + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); + if (!d->root) { + err = -ENOMEM; + goto out_free; + } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; + d->fields[i].dent = debugfs_create_file(field[i], 0400, + d->root, &d->fields[i], + &fops); + if (!d->fields[i].dent) { + err = -ENOMEM; + goto out_rem; + } + } + *dbg = d; + + return 0; +out_rem: + debugfs_remove_recursive(d->root); + +out_free: + kfree(d); + return err; +} + +static void rem_res_tree(struct mlx5_rsc_debug *d) +{ + debugfs_remove_recursive(d->root); + kfree(d); +} + +int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, + &qp->dbg, qp->qpn, qp_fields, + ARRAY_SIZE(qp_fields), qp); + if (err) + qp->dbg = NULL; + + return err; +} + +void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + if (!mlx5_debugfs_root) + return; + + if (qp->dbg) + rem_res_tree(qp->dbg); +} + + +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, + &eq->dbg, eq->eqn, eq_fields, + ARRAY_SIZE(eq_fields), eq); + if (err) + eq->dbg = NULL; + + return err; +} + +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + if (!mlx5_debugfs_root) + return; + + if (eq->dbg) + rem_res_tree(eq->dbg); +} + +int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, + &cq->dbg, cq->cqn, cq_fields, + ARRAY_SIZE(cq_fields), cq); + if (err) + cq->dbg = NULL; + + return err; +} + +void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + if (!mlx5_debugfs_root) + return; + + if (cq->dbg) + rem_res_tree(cq->dbg); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c new file mode 100644 index 000000000..58800e4f3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), + MLX5_EQE_OWNER_INIT_VAL = 0x1, +}; + +enum { + MLX5_EQ_STATE_ARMED = 0x9, + MLX5_EQ_STATE_FIRED = 0xa, + MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, +}; + +enum { + MLX5_NUM_SPARE_EQE = 0x80, + MLX5_NUM_ASYNC_EQE = 0x100, + MLX5_NUM_CMD_EQE = 32, +}; + +enum { + MLX5_EQ_DOORBEL_OFFSET = 0x40, +}; + +#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ + (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ + (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ + (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) + +struct map_eq_in { + u64 mask; + u32 reserved; + u32 unmap_eqn; +}; + +struct cre_des_eq { + u8 reserved[15]; + u8 eqn; +}; + +static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) +{ + struct mlx5_destroy_eq_mbox_in in; + struct mlx5_destroy_eq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); + in.eqn = eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} + +static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) +{ + return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); +} + +static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static const char *eqe_type_str(u8 type) +{ + switch (type) { + case MLX5_EVENT_TYPE_COMP: + return "MLX5_EVENT_TYPE_COMP"; + case MLX5_EVENT_TYPE_PATH_MIG: + return "MLX5_EVENT_TYPE_PATH_MIG"; + case MLX5_EVENT_TYPE_COMM_EST: + return "MLX5_EVENT_TYPE_COMM_EST"; + case MLX5_EVENT_TYPE_SQ_DRAINED: + return "MLX5_EVENT_TYPE_SQ_DRAINED"; + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; + case MLX5_EVENT_TYPE_CQ_ERROR: + return "MLX5_EVENT_TYPE_CQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_INTERNAL_ERROR: + return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; + case MLX5_EVENT_TYPE_PORT_CHANGE: + return "MLX5_EVENT_TYPE_PORT_CHANGE"; + case MLX5_EVENT_TYPE_GPIO_EVENT: + return "MLX5_EVENT_TYPE_GPIO_EVENT"; + case MLX5_EVENT_TYPE_REMOTE_CONFIG: + return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; + case MLX5_EVENT_TYPE_DB_BF_CONGESTION: + return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; + case MLX5_EVENT_TYPE_STALL_EVENT: + return "MLX5_EVENT_TYPE_STALL_EVENT"; + case MLX5_EVENT_TYPE_CMD: + return "MLX5_EVENT_TYPE_CMD"; + case MLX5_EVENT_TYPE_PAGE_REQUEST: + return "MLX5_EVENT_TYPE_PAGE_REQUEST"; + case MLX5_EVENT_TYPE_PAGE_FAULT: + return "MLX5_EVENT_TYPE_PAGE_FAULT"; + default: + return "Unrecognized event"; + } +} + +static enum mlx5_dev_event port_subtype_event(u8 subtype) +{ + switch (subtype) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + return MLX5_DEV_EVENT_PORT_DOWN; + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + return MLX5_DEV_EVENT_PORT_UP; + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + return MLX5_DEV_EVENT_PORT_INITIALIZED; + case MLX5_PORT_CHANGE_SUBTYPE_LID: + return MLX5_DEV_EVENT_LID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + return MLX5_DEV_EVENT_PKEY_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + return MLX5_DEV_EVENT_GUID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + return MLX5_DEV_EVENT_CLIENT_REREG; + } + return -1; +} + +static void eq_update_ci(struct mlx5_eq *eq, int arm) +{ + __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); + u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + __raw_writel((__force u32) cpu_to_be32(val), addr); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + u32 cqn; + u32 rsn; + u8 port; + + while ((eqe = next_eqe_sw(eq))) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", + eq->eqn, eqe_type_str(eqe->type)); + switch (eqe->type) { + case MLX5_EVENT_TYPE_COMP: + cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; + mlx5_cq_completion(dev, cqn); + break; + + case MLX5_EVENT_TYPE_PATH_MIG: + case MLX5_EVENT_TYPE_COMM_EST: + case MLX5_EVENT_TYPE_SQ_DRAINED: + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", + eqe_type_str(eqe->type), eqe->type, rsn); + mlx5_rsc_event(dev, rsn, eqe->type); + break; + + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", + eqe_type_str(eqe->type), eqe->type, rsn); + mlx5_srq_event(dev, rsn, eqe->type); + break; + + case MLX5_EVENT_TYPE_CMD: + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); + break; + + case MLX5_EVENT_TYPE_PORT_CHANGE: + port = (eqe->data.port.port >> 4) & 0xf; + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + case MLX5_PORT_CHANGE_SUBTYPE_LID: + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + if (dev->event) + dev->event(dev, port_subtype_event(eqe->sub_type), + (unsigned long)port); + break; + default: + mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", + port, eqe->sub_type); + } + break; + case MLX5_EVENT_TYPE_CQ_ERROR: + cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; + mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", + cqn, eqe->data.cq_err.syndrome); + mlx5_cq_event(dev, cqn, eqe->type); + break; + + case MLX5_EVENT_TYPE_PAGE_REQUEST: + { + u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); + s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); + + mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", + func_id, npages); + mlx5_core_req_pages_handler(dev, func_id, npages); + } + break; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + case MLX5_EVENT_TYPE_PAGE_FAULT: + mlx5_eq_pagefault(dev, eqe); + break; +#endif + + default: + mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", + eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX5_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + struct mlx5_core_dev *dev = eq->dev; + + mlx5_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner = MLX5_EQE_OWNER_INIT_VAL; + } +} + +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, struct mlx5_uar *uar) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_create_eq_mbox_in *in; + struct mlx5_create_eq_mbox_out out; + int err; + int inlen; + + eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); + err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, + &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_buf; + } + memset(&out, 0, sizeof(out)); + + mlx5_fill_page_array(&eq->buf, in->pas); + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); + in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); + in->ctx.intr = vecidx; + in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; + in->events_mask = cpu_to_be64(mask); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + goto err_in; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto err_in; + } + + snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", + name, pci_name(dev->pdev)); + eq->eqn = out.eq_number; + eq->irqn = vecidx; + eq->dev = dev; + eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; + err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, + eq->name, eq); + if (err) + goto err_eq; + + err = mlx5_debug_eq_add(dev, eq); + if (err) + goto err_irq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + + kvfree(in); + return 0; + +err_irq: + free_irq(table->msix_arr[vecidx].vector, eq); + +err_eq: + mlx5_cmd_destroy_eq(dev, eq->eqn); + +err_in: + kvfree(in); + +err_buf: + mlx5_buf_free(dev, &eq->buf); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_create_map_eq); + +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + mlx5_debug_eq_remove(dev, eq); + free_irq(table->msix_arr[eq->irqn].vector, eq); + err = mlx5_cmd_destroy_eq(dev, eq->eqn); + if (err) + mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + synchronize_irq(table->msix_arr[eq->irqn].vector); + mlx5_buf_free(dev, &eq->buf); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); + +int mlx5_eq_init(struct mlx5_core_dev *dev) +{ + int err; + + spin_lock_init(&dev->priv.eq_table.lock); + + err = mlx5_eq_debugfs_init(dev); + + return err; +} + + +void mlx5_eq_cleanup(struct mlx5_core_dev *dev) +{ + mlx5_eq_debugfs_cleanup(dev); +} + +int mlx5_start_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; + int err; + + if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); + + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, + MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, + "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); + return err; + } + + mlx5_cmd_use_events(dev); + + err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, + MLX5_NUM_ASYNC_EQE, async_event_mask, + "mlx5_async_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create async EQ %d\n", err); + goto err1; + } + + err = mlx5_create_map_eq(dev, &table->pages_eq, + MLX5_EQ_VEC_PAGES, + dev->caps.gen.max_vf + 1, + 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", + &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); + goto err2; + } + + return err; + +err2: + mlx5_destroy_unmap_eq(dev, &table->async_eq); + +err1: + mlx5_cmd_use_polling(dev); + mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + return err; +} + +int mlx5_stop_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); + if (err) + return err; + + mlx5_destroy_unmap_eq(dev, &table->async_eq); + mlx5_cmd_use_polling(dev); + + err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + if (err) + mlx5_cmd_use_events(dev); + + return err; +} + +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct mlx5_query_eq_mbox_out *out, int outlen) +{ + struct mlx5_query_eq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); + in.eqn = eq->eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c new file mode 100644 index 000000000..4b4cda3bc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <linux/module.h> +#include "mlx5_core.h" + +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_query_adapter_mbox_out *out; + struct mlx5_cmd_query_adapter_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_out; + } + + memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); + +out_out: + kfree(out); + + return err; +} + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) +{ + return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); +} + +int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps) +{ + u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; + int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *out; + int err; + + if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) + return -ENOTSUPP; + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) { + mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err); + goto out; + } + + memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct), + sizeof(*caps)); + + mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n", + be32_to_cpu(caps->per_transport_caps.rc_odp_caps), + be32_to_cpu(caps->per_transport_caps.uc_odp_caps), + be32_to_cpu(caps->per_transport_caps.ud_odp_caps)); + +out: + kfree(out); + return err; +} +EXPORT_SYMBOL(mlx5_query_odp_caps); + +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_init_hca_mbox_in in; + struct mlx5_cmd_init_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} + +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_teardown_hca_mbox_in in; + struct mlx5_cmd_teardown_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c new file mode 100644 index 000000000..292d76f2a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/vmalloc.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, + MAX_MISSES = 3, +}; + +enum { + MLX5_HEALTH_SYNDR_FW_ERR = 0x1, + MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, + MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, + MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, + MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, + MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, + MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, + MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, +}; + +static DEFINE_SPINLOCK(health_lock); +static LIST_HEAD(health_list); +static struct work_struct health_work; + +static void health_care(struct work_struct *work) +{ + struct mlx5_core_health *health, *n; + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + LIST_HEAD(tlist); + + spin_lock_irq(&health_lock); + list_splice_init(&health_list, &tlist); + + spin_unlock_irq(&health_lock); + + list_for_each_entry_safe(health, n, &tlist, list) { + priv = container_of(health, struct mlx5_priv, health); + dev = container_of(priv, struct mlx5_core_dev, priv); + mlx5_core_warn(dev, "handling bad device here\n"); + /* nothing yet */ + spin_lock_irq(&health_lock); + list_del_init(&health->list); + spin_unlock_irq(&health_lock); + } +} + +static const char *hsynd_str(u8 synd) +{ + switch (synd) { + case MLX5_HEALTH_SYNDR_FW_ERR: + return "firmware internal error"; + case MLX5_HEALTH_SYNDR_IRISC_ERR: + return "irisc not responding"; + case MLX5_HEALTH_SYNDR_CRC_ERR: + return "firmware CRC error"; + case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: + return "ICM fetch PCI error"; + case MLX5_HEALTH_SYNDR_HW_FTL_ERR: + return "HW fatal error\n"; + case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: + return "async EQ buffer overrun"; + case MLX5_HEALTH_SYNDR_EQ_ERR: + return "EQ error"; + case MLX5_HEALTH_SYNDR_FFSER_ERR: + return "FFSER error"; + default: + return "unrecognized error"; + } +} + +static u16 read_be16(__be16 __iomem *p) +{ + return swab16(readl((__force u16 __iomem *) p)); +} + +static u32 read_be32(__be32 __iomem *p) +{ + return swab32(readl((__force u32 __iomem *) p)); +} + +static void print_health_info(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + struct health_buffer __iomem *h = health->health; + int i; + + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) + pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i)); + + pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr)); + pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra)); + pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver)); + pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id)); + pr_info("irisc_index %d\n", readb(&h->irisc_index)); + pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd))); + pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync)); +} + +static void poll_health(unsigned long data) +{ + struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; + struct mlx5_core_health *health = &dev->priv.health; + unsigned long next; + u32 count; + + count = ioread32be(health->health_counter); + if (count == health->prev) + ++health->miss_counter; + else + health->miss_counter = 0; + + health->prev = count; + if (health->miss_counter == MAX_MISSES) { + mlx5_core_err(dev, "device's health compromised\n"); + print_health_info(dev); + spin_lock_irq(&health_lock); + list_add_tail(&health->list, &health_list); + spin_unlock_irq(&health_lock); + + queue_work(mlx5_core_wq, &health_work); + } else { + get_random_bytes(&next, sizeof(next)); + next %= HZ; + next += jiffies + MLX5_HEALTH_POLL_INTERVAL; + mod_timer(&health->timer, next); + } +} + +void mlx5_start_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + INIT_LIST_HEAD(&health->list); + init_timer(&health->timer); + health->health = &dev->iseg->health; + health->health_counter = &dev->iseg->health_counter; + + health->timer.data = (unsigned long)dev; + health->timer.function = poll_health; + health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); + add_timer(&health->timer); +} + +void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + del_timer_sync(&health->timer); + + spin_lock_irq(&health_lock); + if (!list_empty(&health->list)) + list_del_init(&health->list); + spin_unlock_irq(&health_lock); +} + +void mlx5_health_cleanup(void) +{ +} + +void __init mlx5_health_init(void) +{ + INIT_WORK(&health_work, health_care); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c new file mode 100644 index 000000000..ee1b0b965 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, + u16 opmod, u8 port) +{ + struct mlx5_mad_ifc_mbox_in *in = NULL; + struct mlx5_mad_ifc_mbox_out *out = NULL; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC); + in->hdr.opmod = cpu_to_be16(opmod); + in->port = port; + + memcpy(in->data, inb, sizeof(in->data)); + + err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out)); + if (err) + goto out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out; + } + + memcpy(outb, out->data, sizeof(out->data)); + +out: + kfree(out); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c new file mode 100644 index 000000000..28425e5ea --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -0,0 +1,1070 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <asm-generic/kmap_types.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/io-mapping.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cq.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/srq.h> +#include <linux/debugfs.h> +#include <linux/kmod.h> +#include <linux/mlx5/mlx5_ifc.h> +#include "mlx5_core.h" + +#define DRIVER_NAME "mlx5_core" +#define DRIVER_VERSION "3.0" +#define DRIVER_RELDATE "January 2015" + +MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +int mlx5_core_debug_mask; +module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); +MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); + +#define MLX5_DEFAULT_PROF 2 +static int prof_sel = MLX5_DEFAULT_PROF; +module_param_named(prof_sel, prof_sel, int, 0444); +MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); + +struct workqueue_struct *mlx5_core_wq; +static LIST_HEAD(intf_list); +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(intf_mutex); + +struct mlx5_device_context { + struct list_head list; + struct mlx5_interface *intf; + void *context; +}; + +static struct mlx5_profile profile[] = { + [0] = { + .mask = 0, + }, + [1] = { + .mask = MLX5_PROF_MASK_QP_SIZE, + .log_max_qp = 12, + }, + [2] = { + .mask = MLX5_PROF_MASK_QP_SIZE | + MLX5_PROF_MASK_MR_CACHE, + .log_max_qp = 17, + .mr_cache[0] = { + .size = 500, + .limit = 250 + }, + .mr_cache[1] = { + .size = 500, + .limit = 250 + }, + .mr_cache[2] = { + .size = 500, + .limit = 250 + }, + .mr_cache[3] = { + .size = 500, + .limit = 250 + }, + .mr_cache[4] = { + .size = 500, + .limit = 250 + }, + .mr_cache[5] = { + .size = 500, + .limit = 250 + }, + .mr_cache[6] = { + .size = 500, + .limit = 250 + }, + .mr_cache[7] = { + .size = 500, + .limit = 250 + }, + .mr_cache[8] = { + .size = 500, + .limit = 250 + }, + .mr_cache[9] = { + .size = 500, + .limit = 250 + }, + .mr_cache[10] = { + .size = 500, + .limit = 250 + }, + .mr_cache[11] = { + .size = 500, + .limit = 250 + }, + .mr_cache[12] = { + .size = 64, + .limit = 32 + }, + .mr_cache[13] = { + .size = 32, + .limit = 16 + }, + .mr_cache[14] = { + .size = 16, + .limit = 8 + }, + .mr_cache[15] = { + .size = 8, + .limit = 4 + }, + }, +}; + +static int set_dma_caps(struct pci_dev *pdev) +{ + int err; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); + return err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, + "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "Can't set consistent PCI DMA mask, aborting\n"); + return err; + } + } + + dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); + return err; +} + +static int request_bar(struct pci_dev *pdev) +{ + int err = 0; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing registers BAR, aborting\n"); + return -ENODEV; + } + + err = pci_request_regions(pdev, DRIVER_NAME); + if (err) + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + + return err; +} + +static void release_bar(struct pci_dev *pdev) +{ + pci_release_regions(pdev); +} + +static int mlx5_enable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int num_eqs = 1 << dev->caps.gen.log_max_eq; + int nvec; + int i; + + nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = min_t(int, nvec, num_eqs); + if (nvec <= MLX5_EQ_VEC_COMP_BASE) + return -ENOMEM; + + table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); + if (!table->msix_arr) + return -ENOMEM; + + for (i = 0; i < nvec; i++) + table->msix_arr[i].entry = i; + + nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, + MLX5_EQ_VEC_COMP_BASE + 1, nvec); + if (nvec < 0) + return nvec; + + table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; + + return 0; +} + +static void mlx5_disable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + + pci_disable_msix(dev->pdev); + kfree(table->msix_arr); +} + +struct mlx5_reg_host_endianess { + u8 he; + u8 rsvd[15]; +}; + + +#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) + +enum { + MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | + MLX5_DEV_CAP_FLAG_DCT, +}; + +static u16 to_fw_pkey_sz(u32 size) +{ + switch (size) { + case 128: + return 0; + case 256: + return 1; + case 512: + return 2; + case 1024: + return 3; + case 2048: + return 4; + case 4096: + return 5; + default: + pr_warn("invalid pkey table size %d\n", size); + return 0; + } +} + +/* selectively copy writable fields clearing any reserved area + */ +static void copy_rw_fields(void *to, struct mlx5_caps *from) +{ + __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22); + u64 v64; + + MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp); + MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); + MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); + MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); + MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); + MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); + v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; + *flags_off = cpu_to_be64(v64); +} + +static u16 get_pkey_table_size(int pkey) +{ + if (pkey > MLX5_MAX_LOG_PKEY_TABLE) + return 0; + + return MLX5_MIN_PKEY_TABLE_SIZE << pkey; +} + +static void fw2drv_caps(struct mlx5_caps *caps, void *out) +{ + struct mlx5_general_caps *gen = &caps->gen; + + gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); + gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); + gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); + gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz); + gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs); + gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); + gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); + gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); + gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey); + gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq); + gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection); + gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz); + gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size); + gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size); + gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc); + gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc); + gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp); + gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp); + gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt); + gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size)); + gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay); + gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports); + gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg); + gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support); + gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); + pr_debug("flags = 0x%llx\n", gen->flags); + gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz); + gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz); + gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf); + gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size); + gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq); + gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq); + gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc); + gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg); + gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd); + gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd); + gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz); +} + +static const char *caps_opmod_str(u16 opmod) +{ + switch (opmod) { + case HCA_CAP_OPMOD_GET_MAX: + return "GET_MAX"; + case HCA_CAP_OPMOD_GET_CUR: + return "GET_CUR"; + default: + return "Invalid"; + } +} + +int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, + u16 opmod) +{ + u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; + int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *out; + int err; + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, opmod); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto query_ex; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) { + mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); + goto query_ex; + } + mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); + fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct)); + +query_ex: + kfree(out); + return err; +} + +static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) +{ + u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; + int err; + + memset(out, 0, sizeof(out)); + + MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); + if (err) + return err; + + err = mlx5_cmd_status_to_err_v2(out); + + return err; +} + +static int handle_hca_cap(struct mlx5_core_dev *dev) +{ + void *set_ctx = NULL; + struct mlx5_profile *prof = dev->profile; + struct mlx5_caps *cur_caps = NULL; + struct mlx5_caps *max_caps = NULL; + int err = -ENOMEM; + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + + set_ctx = kzalloc(set_sz, GFP_KERNEL); + if (!set_ctx) + goto query_ex; + + max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL); + if (!max_caps) + goto query_ex; + + cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL); + if (!cur_caps) + goto query_ex; + + err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX); + if (err) + goto query_ex; + + err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR); + if (err) + goto query_ex; + + /* we limit the size of the pkey table to 128 entries for now */ + cur_caps->gen.pkey_table_size = 128; + + if (prof->mask & MLX5_PROF_MASK_QP_SIZE) + cur_caps->gen.log_max_qp = prof->log_max_qp; + + /* disable checksum */ + cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + + copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct), + cur_caps); + err = set_caps(dev, set_ctx, set_sz); + +query_ex: + kfree(cur_caps); + kfree(max_caps); + kfree(set_ctx); + + return err; +} + +static int set_hca_ctrl(struct mlx5_core_dev *dev) +{ + struct mlx5_reg_host_endianess he_in; + struct mlx5_reg_host_endianess he_out; + int err; + + memset(&he_in, 0, sizeof(he_in)); + he_in.he = MLX5_SET_HOST_ENDIANNESS; + err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), + &he_out, sizeof(he_out), + MLX5_REG_HOST_ENDIANNESS, 0, 1); + return err; +} + +static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) +{ + int err; + struct mlx5_enable_hca_mbox_in in; + struct mlx5_enable_hca_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; +} + +static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) +{ + int err; + struct mlx5_disable_hca_mbox_in in; + struct mlx5_disable_hca_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; +} + +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq *eq, *n; + int err = -ENOENT; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} +EXPORT_SYMBOL(mlx5_vector2eqn); + +static void free_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (mlx5_destroy_unmap_eq(dev, eq)) + mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", + eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int alloc_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + char name[MLX5_MAX_EQ_NAME]; + struct mlx5_eq *eq; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = MLX5_COMP_EQ_SIZE; + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + err = mlx5_create_map_eq(dev, eq, + i + MLX5_EQ_VEC_COMP_BASE, nent, 0, + name, &dev->priv.uuari.uars[0]); + if (err) { + kfree(eq); + goto clean; + } + mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &table->comp_eqs_list); + spin_unlock(&table->lock); + } + + return 0; + +clean: + free_comp_eqs(dev); + return err; +} + +static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) +{ + struct mlx5_priv *priv = &dev->priv; + int err; + + dev->pdev = pdev; + pci_set_drvdata(dev->pdev, dev); + strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); + priv->name[MLX5_MAX_NAME_LEN - 1] = 0; + + mutex_init(&priv->pgdir_mutex); + INIT_LIST_HEAD(&priv->pgdir_list); + spin_lock_init(&priv->mkey_lock); + + priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); + if (!priv->dbg_root) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + goto err_dbg; + } + + err = request_bar(pdev); + if (err) { + dev_err(&pdev->dev, "error requesting BARs, aborting\n"); + goto err_disable; + } + + pci_set_master(pdev); + + err = set_dma_caps(pdev); + if (err) { + dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); + goto err_clr_master; + } + + dev->iseg_base = pci_resource_start(dev->pdev, 0); + dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); + if (!dev->iseg) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); + goto err_clr_master; + } + dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), + fw_rev_min(dev), fw_rev_sub(dev)); + + err = mlx5_cmd_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); + goto err_unmap; + } + + mlx5_pagealloc_init(dev); + + err = mlx5_core_enable_hca(dev); + if (err) { + dev_err(&pdev->dev, "enable hca failed\n"); + goto err_pagealloc_cleanup; + } + + err = mlx5_satisfy_startup_pages(dev, 1); + if (err) { + dev_err(&pdev->dev, "failed to allocate boot pages\n"); + goto err_disable_hca; + } + + err = set_hca_ctrl(dev); + if (err) { + dev_err(&pdev->dev, "set_hca_ctrl failed\n"); + goto reclaim_boot_pages; + } + + err = handle_hca_cap(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap failed\n"); + goto reclaim_boot_pages; + } + + err = mlx5_satisfy_startup_pages(dev, 0); + if (err) { + dev_err(&pdev->dev, "failed to allocate init pages\n"); + goto reclaim_boot_pages; + } + + err = mlx5_pagealloc_start(dev); + if (err) { + dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); + goto reclaim_boot_pages; + } + + err = mlx5_cmd_init_hca(dev); + if (err) { + dev_err(&pdev->dev, "init hca failed\n"); + goto err_pagealloc_stop; + } + + mlx5_start_health_poll(dev); + + err = mlx5_cmd_query_hca_cap(dev, &dev->caps); + if (err) { + dev_err(&pdev->dev, "query hca failed\n"); + goto err_stop_poll; + } + + err = mlx5_cmd_query_adapter(dev); + if (err) { + dev_err(&pdev->dev, "query adapter failed\n"); + goto err_stop_poll; + } + + err = mlx5_enable_msix(dev); + if (err) { + dev_err(&pdev->dev, "enable msix failed\n"); + goto err_stop_poll; + } + + err = mlx5_eq_init(dev); + if (err) { + dev_err(&pdev->dev, "failed to initialize eq\n"); + goto disable_msix; + } + + err = mlx5_alloc_uuars(dev, &priv->uuari); + if (err) { + dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); + goto err_eq_cleanup; + } + + err = mlx5_start_eqs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); + goto err_free_uar; + } + + err = alloc_comp_eqs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc completion EQs\n"); + goto err_stop_eqs; + } + + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); + + mlx5_init_cq_table(dev); + mlx5_init_qp_table(dev); + mlx5_init_srq_table(dev); + mlx5_init_mr_table(dev); + + return 0; + +err_stop_eqs: + mlx5_stop_eqs(dev); + +err_free_uar: + mlx5_free_uuars(dev, &priv->uuari); + +err_eq_cleanup: + mlx5_eq_cleanup(dev); + +disable_msix: + mlx5_disable_msix(dev); + +err_stop_poll: + mlx5_stop_health_poll(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return err; + } + +err_pagealloc_stop: + mlx5_pagealloc_stop(dev); + +reclaim_boot_pages: + mlx5_reclaim_startup_pages(dev); + +err_disable_hca: + mlx5_core_disable_hca(dev); + +err_pagealloc_cleanup: + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + +err_unmap: + iounmap(dev->iseg); + +err_clr_master: + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + +err_disable: + pci_disable_device(dev->pdev); + +err_dbg: + debugfs_remove(priv->dbg_root); + return err; +} + +static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + + mlx5_cleanup_srq_table(dev); + mlx5_cleanup_qp_table(dev); + mlx5_cleanup_cq_table(dev); + free_comp_eqs(dev); + mlx5_stop_eqs(dev); + mlx5_free_uuars(dev, &priv->uuari); + mlx5_eq_cleanup(dev); + mlx5_disable_msix(dev); + mlx5_stop_health_poll(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return; + } + mlx5_pagealloc_stop(dev); + mlx5_reclaim_startup_pages(dev); + mlx5_core_disable_hca(dev); + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + iounmap(dev->iseg); + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + pci_disable_device(dev->pdev); + debugfs_remove(priv->dbg_root); +} + +static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + + dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); + if (!dev_ctx) { + pr_warn("mlx5_add_device: alloc context failed\n"); + return; + } + + dev_ctx->intf = intf; + dev_ctx->context = intf->add(dev); + + if (dev_ctx->context) { + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else { + kfree(dev_ctx); + } +} + +static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) { + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + intf->remove(dev, dev_ctx->context); + kfree(dev_ctx); + return; + } +} +static int mlx5_register_device(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + struct mlx5_interface *intf; + + mutex_lock(&intf_mutex); + list_add_tail(&priv->dev_list, &dev_list); + list_for_each_entry(intf, &intf_list, list) + mlx5_add_device(intf, priv); + mutex_unlock(&intf_mutex); + + return 0; +} +static void mlx5_unregister_device(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + struct mlx5_interface *intf; + + mutex_lock(&intf_mutex); + list_for_each_entry(intf, &intf_list, list) + mlx5_remove_device(intf, priv); + list_del(&priv->dev_list); + mutex_unlock(&intf_mutex); +} + +int mlx5_register_interface(struct mlx5_interface *intf) +{ + struct mlx5_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&intf_mutex); + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &dev_list, dev_list) + mlx5_add_device(intf, priv); + mutex_unlock(&intf_mutex); + + return 0; +} +EXPORT_SYMBOL(mlx5_register_interface); + +void mlx5_unregister_interface(struct mlx5_interface *intf) +{ + struct mlx5_priv *priv; + + mutex_lock(&intf_mutex); + list_for_each_entry(priv, &dev_list, dev_list) + mlx5_remove_device(intf, priv); + list_del(&intf->list); + mutex_unlock(&intf_mutex); +} +EXPORT_SYMBOL(mlx5_unregister_interface); + +void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) +{ + struct mlx5_priv *priv = &mdev->priv; + struct mlx5_device_context *dev_ctx; + unsigned long flags; + void *result = NULL; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) + if ((dev_ctx->intf->protocol == protocol) && + dev_ctx->intf->get_dev) { + result = dev_ctx->intf->get_dev(dev_ctx->context); + break; + } + + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + return result; +} +EXPORT_SYMBOL(mlx5_get_protocol_dev); + +static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, + unsigned long param) +{ + struct mlx5_priv *priv = &dev->priv; + struct mlx5_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf->event) + dev_ctx->intf->event(dev, dev_ctx->context, event, param); + + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +struct mlx5_core_event_handler { + void (*event)(struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + void *data); +}; + +#define MLX5_IB_MOD "mlx5_ib" + +static int init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + int err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + dev_err(&pdev->dev, "kzalloc failed\n"); + return -ENOMEM; + } + priv = &dev->priv; + + pci_set_drvdata(pdev, dev); + + if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { + pr_warn("selected profile out of range, selecting default (%d)\n", + MLX5_DEFAULT_PROF); + prof_sel = MLX5_DEFAULT_PROF; + } + dev->profile = &profile[prof_sel]; + dev->event = mlx5_core_event; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + err = mlx5_dev_init(dev, pdev); + if (err) { + dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); + goto out; + } + + err = mlx5_register_device(dev); + if (err) { + dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); + goto out_init; + } + + err = request_module_nowait(MLX5_IB_MOD); + if (err) + pr_info("failed request module on %s\n", MLX5_IB_MOD); + + return 0; + +out_init: + mlx5_dev_cleanup(dev); +out: + kfree(dev); + return err; +} +static void remove_one(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + mlx5_unregister_device(dev); + mlx5_dev_cleanup(dev); + kfree(dev); +} + +static const struct pci_device_id mlx5_core_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ + { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */ + { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */ + { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */ + { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ + { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); + +static struct pci_driver mlx5_core_driver = { + .name = DRIVER_NAME, + .id_table = mlx5_core_pci_table, + .probe = init_one, + .remove = remove_one +}; + +static int __init init(void) +{ + int err; + + mlx5_register_debugfs(); + mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq"); + if (!mlx5_core_wq) { + err = -ENOMEM; + goto err_debug; + } + mlx5_health_init(); + + err = pci_register_driver(&mlx5_core_driver); + if (err) + goto err_health; + + return 0; + +err_health: + mlx5_health_cleanup(); + destroy_workqueue(mlx5_core_wq); +err_debug: + mlx5_unregister_debugfs(); + return err; +} + +static void __exit cleanup(void) +{ + pci_unregister_driver(&mlx5_core_driver); + mlx5_health_cleanup(); + destroy_workqueue(mlx5_core_wq); + mlx5_unregister_debugfs(); +} + +module_init(init); +module_exit(cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c new file mode 100644 index 000000000..d79fd85d1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <rdma/ib_verbs.h> +#include "mlx5_core.h" + +struct mlx5_attach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_attach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +struct mlx5_detach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_detach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_attach_mcg_mbox_in in; + struct mlx5_attach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_attach_mcg); + +int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_detach_mcg_mbox_in in; + struct mlx5_detach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_detach_mcg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h new file mode 100644 index 000000000..a051b906a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_CORE_H__ +#define __MLX5_CORE_H__ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/sched.h> + +extern int mlx5_core_debug_mask; + +#define mlx5_core_dbg(dev, format, ...) \ + pr_debug("%s:%s:%d:(pid %d): " format, \ + (dev)->priv.name, __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define mlx5_core_dbg_mask(dev, mask, format, ...) \ +do { \ + if ((mask) & mlx5_core_debug_mask) \ + mlx5_core_dbg(dev, format, ##__VA_ARGS__); \ +} while (0) + +#define mlx5_core_err(dev, format, ...) \ + pr_err("%s:%s:%d:(pid %d): " format, \ + (dev)->priv.name, __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define mlx5_core_warn(dev, format, ...) \ + pr_warn("%s:%s:%d:(pid %d): " format, \ + (dev)->priv.name, __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +enum { + MLX5_CMD_DATA, /* print command payload only */ + MLX5_CMD_TIME, /* print command execution time */ +}; + + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, + struct mlx5_caps *caps); +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); + +#endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c new file mode 100644 index 000000000..1adb300dd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +void mlx5_init_mr_table(struct mlx5_core_dev *dev) +{ + struct mlx5_mr_table *table = &dev->priv.mr_table; + + rwlock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev) +{ +} + +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_create_mkey_mbox_in *in, int inlen, + mlx5_cmd_cbk_t callback, void *context, + struct mlx5_create_mkey_mbox_out *out) +{ + struct mlx5_mr_table *table = &dev->priv.mr_table; + struct mlx5_create_mkey_mbox_out lout; + int err; + u8 key; + + memset(&lout, 0, sizeof(lout)); + spin_lock_irq(&dev->priv.mkey_lock); + key = dev->priv.mkey_key++; + spin_unlock_irq(&dev->priv.mkey_lock); + in->seg.qpn_mkey7_0 |= cpu_to_be32(key); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); + if (callback) { + err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), + callback, context); + return err; + } else { + err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); + } + + if (err) { + mlx5_core_dbg(dev, "cmd exec failed %d\n", err); + return err; + } + + if (lout.hdr.status) { + mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); + return mlx5_cmd_status_to_err(&lout.hdr); + } + + mr->iova = be64_to_cpu(in->seg.start_addr); + mr->size = be64_to_cpu(in->seg.len); + mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; + mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; + + mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", + be32_to_cpu(lout.mkey), key, mr->key); + + /* connect to MR tree */ + write_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); + write_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n", + mlx5_base_mkey(mr->key), err); + mlx5_core_destroy_mkey(dev, mr); + } + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_mkey); + +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) +{ + struct mlx5_mr_table *table = &dev->priv.mr_table; + struct mlx5_destroy_mkey_mbox_in in; + struct mlx5_destroy_mkey_mbox_out out; + struct mlx5_core_mr *deleted_mr; + unsigned long flags; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + write_lock_irqsave(&table->lock, flags); + deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); + write_unlock_irqrestore(&table->lock, flags); + if (!deleted_mr) { + mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", + mlx5_base_mkey(mr->key)); + return -ENOENT; + } + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_destroy_mkey); + +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_query_mkey_mbox_out *out, int outlen) +{ + struct mlx5_query_mkey_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_mkey); + +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + u32 *mkey) +{ + struct mlx5_query_special_ctxs_mbox_in in; + struct mlx5_query_special_ctxs_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *mkey = be32_to_cpu(out.dump_fill_mkey); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); + +int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, + int npsvs, u32 *sig_index) +{ + struct mlx5_allocate_psv_in in; + struct mlx5_allocate_psv_out out; + int i, err; + + if (npsvs > MLX5_MAX_PSVS) + return -EINVAL; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV); + in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + mlx5_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + + if (out.hdr.status) { + mlx5_core_err(dev, "create_psv bad status %d\n", + out.hdr.status); + return mlx5_cmd_status_to_err(&out.hdr); + } + + for (i = 0; i < npsvs; i++) + sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff; + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_psv); + +int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) +{ + struct mlx5_destroy_psv_in in; + struct mlx5_destroy_psv_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.psv_number = cpu_to_be32(psv_num); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err); + goto out; + } + + if (out.hdr.status) { + mlx5_core_err(dev, "destroy_psv bad status %d\n", + out.hdr.status); + err = mlx5_cmd_status_to_err(&out.hdr); + goto out; + } + +out: + return err; +} +EXPORT_SYMBOL(mlx5_core_destroy_psv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c new file mode 100644 index 000000000..8a64542ab --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -0,0 +1,534 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <asm-generic/kmap_types.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + MLX5_PAGES_CANT_GIVE = 0, + MLX5_PAGES_GIVE = 1, + MLX5_PAGES_TAKE = 2 +}; + +enum { + MLX5_BOOT_PAGES = 1, + MLX5_INIT_PAGES = 2, + MLX5_POST_INIT_PAGES = 3 +}; + +struct mlx5_pages_req { + struct mlx5_core_dev *dev; + u16 func_id; + s32 npages; + struct work_struct work; +}; + +struct fw_page { + struct rb_node rb_node; + u64 addr; + struct page *page; + u16 func_id; + unsigned long bitmask; + struct list_head list; + unsigned free_count; +}; + +struct mlx5_query_pages_inbox { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_pages_outbox { + struct mlx5_outbox_hdr hdr; + __be16 rsvd; + __be16 func_id; + __be32 num_pages; +}; + +struct mlx5_manage_pages_inbox { + struct mlx5_inbox_hdr hdr; + __be16 rsvd; + __be16 func_id; + __be32 num_entries; + __be64 pas[0]; +}; + +struct mlx5_manage_pages_outbox { + struct mlx5_outbox_hdr hdr; + __be32 num_entries; + u8 rsvd[4]; + __be64 pas[0]; +}; + +enum { + MAX_RECLAIM_TIME_MSECS = 5000, +}; + +enum { + MLX5_MAX_RECLAIM_TIME_MILI = 5000, + MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, +}; + +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node **new = &root->rb_node; + struct rb_node *parent = NULL; + struct fw_page *nfp; + struct fw_page *tfp; + int i; + + while (*new) { + parent = *new; + tfp = rb_entry(parent, struct fw_page, rb_node); + if (tfp->addr < addr) + new = &parent->rb_left; + else if (tfp->addr > addr) + new = &parent->rb_right; + else + return -EEXIST; + } + + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); + if (!nfp) + return -ENOMEM; + + nfp->addr = addr; + nfp->page = page; + nfp->func_id = func_id; + nfp->free_count = MLX5_NUM_4K_IN_PAGE; + for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) + set_bit(i, &nfp->bitmask); + + rb_link_node(&nfp->rb_node, parent, new); + rb_insert_color(&nfp->rb_node, root); + list_add(&nfp->list, &dev->priv.free_list); + + return 0; +} + +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node *tmp = root->rb_node; + struct fw_page *result = NULL; + struct fw_page *tfp; + + while (tmp) { + tfp = rb_entry(tmp, struct fw_page, rb_node); + if (tfp->addr < addr) { + tmp = tmp->rb_left; + } else if (tfp->addr > addr) { + tmp = tmp->rb_right; + } else { + result = tfp; + break; + } + } + + return result; +} + +static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, + s32 *npages, int boot) +{ + struct mlx5_query_pages_inbox in; + struct mlx5_query_pages_outbox out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); + in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); + + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *npages = be32_to_cpu(out.num_pages); + *func_id = be16_to_cpu(out.func_id); + + return err; +} + +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) +{ + struct fw_page *fp; + unsigned n; + + if (list_empty(&dev->priv.free_list)) + return -ENOMEM; + + fp = list_entry(dev->priv.free_list.next, struct fw_page, list); + n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); + if (n >= MLX5_NUM_4K_IN_PAGE) { + mlx5_core_warn(dev, "alloc 4k bug\n"); + return -ENOENT; + } + clear_bit(n, &fp->bitmask); + fp->free_count--; + if (!fp->free_count) + list_del(&fp->list); + + *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; + + return 0; +} + +#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) + +static void free_4k(struct mlx5_core_dev *dev, u64 addr) +{ + struct fw_page *fwp; + int n; + + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); + if (!fwp) { + mlx5_core_warn(dev, "page not found\n"); + return; + } + + n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; + fwp->free_count++; + set_bit(n, &fwp->bitmask); + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { + rb_erase(&fwp->rb_node, &dev->priv.page_root); + if (fwp->free_count != 1) + list_del(&fwp->list); + dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(fwp->page); + kfree(fwp); + } else if (fwp->free_count == 1) { + list_add(&fwp->list, &dev->priv.free_list); + } +} + +static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) +{ + struct page *page; + u64 addr; + int err; + int nid = dev_to_node(&dev->pdev->dev); + + page = alloc_pages_node(nid, GFP_HIGHUSER, 0); + if (!page) { + mlx5_core_warn(dev, "failed to allocate page\n"); + return -ENOMEM; + } + addr = dma_map_page(&dev->pdev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + mlx5_core_warn(dev, "failed dma mapping page\n"); + err = -ENOMEM; + goto out_alloc; + } + err = insert_page(dev, addr, page, func_id); + if (err) { + mlx5_core_err(dev, "failed to track allocated page\n"); + goto out_mapping; + } + + return 0; + +out_mapping: + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + +out_alloc: + __free_page(page); + + return err; +} +static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, + int notify_fail) +{ + struct mlx5_manage_pages_inbox *in; + struct mlx5_manage_pages_outbox out; + struct mlx5_manage_pages_inbox *nin; + int inlen; + u64 addr; + int err; + int i; + + inlen = sizeof(*in) + npages * sizeof(in->pas[0]); + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); + return -ENOMEM; + } + memset(&out, 0, sizeof(out)); + + for (i = 0; i < npages; i++) { +retry: + err = alloc_4k(dev, &addr); + if (err) { + if (err == -ENOMEM) + err = alloc_system_page(dev, func_id); + if (err) + goto out_4k; + + goto retry; + } + in->pas[i] = cpu_to_be64(addr); + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); + in->func_id = cpu_to_be16(func_id); + in->num_entries = cpu_to_be32(npages); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", + func_id, npages, err); + goto out_alloc; + } + dev->priv.fw_pages += npages; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", + func_id, npages, out.hdr.status); + goto out_alloc; + } + } + + mlx5_core_dbg(dev, "err %d\n", err); + + goto out_free; + +out_alloc: + if (notify_fail) { + nin = kzalloc(sizeof(*nin), GFP_KERNEL); + if (!nin) { + mlx5_core_warn(dev, "allocation failed\n"); + goto out_4k; + } + memset(&out, 0, sizeof(out)); + nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); + if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) + mlx5_core_warn(dev, "page notify failed\n"); + kfree(nin); + } + +out_4k: + for (i--; i >= 0; i--) + free_4k(dev, be64_to_cpu(in->pas[i])); +out_free: + kvfree(in); + return err; +} + +static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, + int *nclaimed) +{ + struct mlx5_manage_pages_inbox in; + struct mlx5_manage_pages_outbox *out; + int num_claimed; + int outlen; + u64 addr; + int err; + int i; + + if (nclaimed) + *nclaimed = 0; + + memset(&in, 0, sizeof(in)); + outlen = sizeof(*out) + npages * sizeof(out->pas[0]); + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); + in.func_id = cpu_to_be16(func_id); + in.num_entries = cpu_to_be32(npages); + mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) { + mlx5_core_err(dev, "failed reclaiming pages\n"); + goto out_free; + } + dev->priv.fw_pages -= npages; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_free; + } + + num_claimed = be32_to_cpu(out->num_entries); + if (nclaimed) + *nclaimed = num_claimed; + + for (i = 0; i < num_claimed; i++) { + addr = be64_to_cpu(out->pas[i]); + free_4k(dev, addr); + } + +out_free: + kvfree(out); + return err; +} + +static void pages_work_handler(struct work_struct *work) +{ + struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); + struct mlx5_core_dev *dev = req->dev; + int err = 0; + + if (req->npages < 0) + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); + else if (req->npages > 0) + err = give_pages(dev, req->func_id, req->npages, 1); + + if (err) + mlx5_core_warn(dev, "%s fail %d\n", + req->npages < 0 ? "reclaim" : "give", err); + + kfree(req); +} + +void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, + s32 npages) +{ + struct mlx5_pages_req *req; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) { + mlx5_core_warn(dev, "failed to allocate pages request\n"); + return; + } + + req->dev = dev; + req->func_id = func_id; + req->npages = npages; + INIT_WORK(&req->work, pages_work_handler); + queue_work(dev->priv.pg_wq, &req->work); +} + +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) +{ + u16 uninitialized_var(func_id); + s32 uninitialized_var(npages); + int err; + + err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); + if (err) + return err; + + mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", + npages, boot ? "boot" : "init", func_id); + + return give_pages(dev, func_id, npages, 0); +} + +enum { + MLX5_BLKS_FOR_RECLAIM_PAGES = 12 +}; + +static int optimal_reclaimed_pages(void) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_layout *lay; + int ret; + + ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - + sizeof(struct mlx5_manage_pages_outbox)) / + FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); + + return ret; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + struct fw_page *fwp; + struct rb_node *p; + int nclaimed = 0; + int err; + + do { + p = rb_first(&dev->priv.page_root); + if (p) { + fwp = rb_entry(p, struct fw_page, rb_node); + err = reclaim_pages(dev, fwp->func_id, + optimal_reclaimed_pages(), + &nclaimed); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", + err); + return err; + } + if (nclaimed) + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + } + if (time_after(jiffies, end)) { + mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); + break; + } + } while (p); + + return 0; +} + +void mlx5_pagealloc_init(struct mlx5_core_dev *dev) +{ + dev->priv.page_root = RB_ROOT; + INIT_LIST_HEAD(&dev->priv.free_list); +} + +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) +{ + /* nothing */ +} + +int mlx5_pagealloc_start(struct mlx5_core_dev *dev) +{ + dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); + if (!dev->priv.pg_wq) + return -ENOMEM; + + return 0; +} + +void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) +{ + destroy_workqueue(dev->priv.pg_wq); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c new file mode 100644 index 000000000..f2d3aee90 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +struct mlx5_alloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) +{ + struct mlx5_alloc_pd_mbox_in in; + struct mlx5_alloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *pdn = be32_to_cpu(out.pdn) & 0xffffff; + return err; +} +EXPORT_SYMBOL(mlx5_core_alloc_pd); + +int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) +{ + struct mlx5_dealloc_pd_mbox_in in; + struct mlx5_dealloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dealloc_pd); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c new file mode 100644 index 000000000..49e90f261 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) +{ + struct mlx5_access_reg_mbox_in *in = NULL; + struct mlx5_access_reg_mbox_out *out = NULL; + int err = -ENOMEM; + + in = mlx5_vzalloc(sizeof(*in) + size_in); + if (!in) + return -ENOMEM; + + out = mlx5_vzalloc(sizeof(*out) + size_out); + if (!out) + goto ex1; + + memcpy(in->data, data_in, size_in); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); + in->hdr.opmod = cpu_to_be16(!write); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); + err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, + sizeof(*out) + size_out); + if (err) + goto ex2; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + if (!err) + memcpy(data_out, out->data, size_out); + +ex2: + kvfree(out); +ex1: + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_access_reg); + + +struct mlx5_reg_pcap { + u8 rsvd0; + u8 port_num; + u8 rsvd1[2]; + __be32 caps_127_96; + __be32 caps_95_64; + __be32 caps_63_32; + __be32 caps_31_0; +}; + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) +{ + struct mlx5_reg_pcap in; + struct mlx5_reg_pcap out; + int err; + + memset(&in, 0, sizeof(in)); + in.caps_127_96 = cpu_to_be32(caps); + in.port_num = port_num; + + err = mlx5_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), MLX5_REG_PCAP, 0, 1); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_caps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c new file mode 100644 index 000000000..dc7dbf7e9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include <linux/gfp.h> +#include <linux/export.h> +#include <linux/mlx5/cmd.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/driver.h> + +#include "mlx5_core.h" + +static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev, + u32 rsn) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_core_rsc_common *common; + + spin_lock(&table->lock); + + common = radix_tree_lookup(&table->tree, rsn); + if (common) + atomic_inc(&common->refcount); + + spin_unlock(&table->lock); + + if (!common) { + mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", + rsn); + return NULL; + } + return common; +} + +void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common) +{ + if (atomic_dec_and_test(&common->refcount)) + complete(&common->free); +} + +void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) +{ + struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn); + struct mlx5_core_qp *qp; + + if (!common) + return; + + switch (common->res) { + case MLX5_RES_QP: + qp = (struct mlx5_core_qp *)common; + qp->event(qp, event_type); + break; + + default: + mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); + } + + mlx5_core_put_rsc(common); +} + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) +{ + struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault; + int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; + struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); + struct mlx5_core_qp *qp = + container_of(common, struct mlx5_core_qp, common); + struct mlx5_pagefault pfault; + + if (!qp) { + mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n", + qpn); + return; + } + + pfault.event_subtype = eqe->sub_type; + pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) & + (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA); + pfault.bytes_committed = be32_to_cpu( + pf_eqe->bytes_committed); + + mlx5_core_dbg(dev, + "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n", + eqe->sub_type, pfault.flags); + + switch (eqe->sub_type) { + case MLX5_PFAULT_SUBTYPE_RDMA: + /* RDMA based event */ + pfault.rdma.r_key = + be32_to_cpu(pf_eqe->rdma.r_key); + pfault.rdma.packet_size = + be16_to_cpu(pf_eqe->rdma.packet_length); + pfault.rdma.rdma_op_len = + be32_to_cpu(pf_eqe->rdma.rdma_op_len); + pfault.rdma.rdma_va = + be64_to_cpu(pf_eqe->rdma.rdma_va); + mlx5_core_dbg(dev, + "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n", + qpn, pfault.rdma.r_key); + mlx5_core_dbg(dev, + "PAGE_FAULT: rdma_op_len: 0x%08x,\n", + pfault.rdma.rdma_op_len); + mlx5_core_dbg(dev, + "PAGE_FAULT: rdma_va: 0x%016llx,\n", + pfault.rdma.rdma_va); + mlx5_core_dbg(dev, + "PAGE_FAULT: bytes_committed: 0x%06x\n", + pfault.bytes_committed); + break; + + case MLX5_PFAULT_SUBTYPE_WQE: + /* WQE based event */ + pfault.wqe.wqe_index = + be16_to_cpu(pf_eqe->wqe.wqe_index); + pfault.wqe.packet_size = + be16_to_cpu(pf_eqe->wqe.packet_length); + mlx5_core_dbg(dev, + "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n", + qpn, pfault.wqe.wqe_index); + mlx5_core_dbg(dev, + "PAGE_FAULT: bytes_committed: 0x%06x\n", + pfault.bytes_committed); + break; + + default: + mlx5_core_warn(dev, + "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n", + eqe->sub_type, qpn); + /* Unsupported page faults should still be resolved by the + * page fault handler + */ + } + + if (qp->pfault_handler) { + qp->pfault_handler(qp, &pfault); + } else { + mlx5_core_err(dev, + "ODP event for QP %08x, without a fault handler in QP\n", + qpn); + /* Page fault will remain unresolved. QP will hang until it is + * destroyed + */ + } + + mlx5_core_put_rsc(common); +} +#endif + +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + struct mlx5_create_qp_mbox_in *in, + int inlen) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_create_qp_mbox_out out; + struct mlx5_destroy_qp_mbox_in din; + struct mlx5_destroy_qp_mbox_out dout; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_warn(dev, "ret %d\n", err); + return err; + } + + if (out.hdr.status) { + mlx5_core_warn(dev, "current num of QPs 0x%x\n", + atomic_read(&dev->num_qps)); + return mlx5_cmd_status_to_err(&out.hdr); + } + + qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); + + qp->common.res = MLX5_RES_QP; + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d\n", err); + goto err_cmd; + } + + err = mlx5_debug_qp_add(dev, qp); + if (err) + mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", + qp->qpn); + + qp->pid = current->pid; + atomic_set(&qp->common.refcount, 1); + atomic_inc(&dev->num_qps); + init_completion(&qp->common.free); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + din.qpn = cpu_to_be32(qp->qpn); + mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_create_qp); + +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp) +{ + struct mlx5_destroy_qp_mbox_in in; + struct mlx5_destroy_qp_mbox_out out; + struct mlx5_qp_table *table = &dev->priv.qp_table; + unsigned long flags; + int err; + + mlx5_debug_qp_remove(dev, qp); + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); + wait_for_completion(&qp->common.free); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + atomic_dec(&dev->num_qps); + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); + +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, + enum mlx5_qp_state new_state, + struct mlx5_modify_qp_mbox_in *in, int sqd_event, + struct mlx5_core_qp *qp) +{ + static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { + [MLX5_QP_STATE_RST] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, + }, + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, + [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, + }, + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, + }, + [MLX5_QP_STATE_SQD] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + }, + [MLX5_QP_STATE_SQER] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, + }, + [MLX5_QP_STATE_ERR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + } + }; + + struct mlx5_modify_qp_mbox_out out; + int err = 0; + u16 op; + + if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE || + !optab[cur_state][new_state]) + return -EINVAL; + + memset(&out, 0, sizeof(out)); + op = optab[cur_state][new_state]; + in->hdr.opcode = cpu_to_be16(op); + in->qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err) + return err; + + return mlx5_cmd_status_to_err(&out.hdr); +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); + +void mlx5_init_qp_table(struct mlx5_core_dev *dev) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + mlx5_qp_debugfs_init(dev); +} + +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) +{ + mlx5_qp_debugfs_cleanup(dev); +} + +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + struct mlx5_query_qp_mbox_out *out, int outlen) +{ + struct mlx5_query_qp_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_query); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) +{ + struct mlx5_alloc_xrcd_mbox_in in; + struct mlx5_alloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + else + *xrcdn = be32_to_cpu(out.xrcdn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); + +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) +{ + struct mlx5_dealloc_xrcd_mbox_in in; + struct mlx5_dealloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); + in.xrcdn = cpu_to_be32(xrcdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, + u8 flags, int error) +{ + struct mlx5_page_fault_resume_mbox_in in; + struct mlx5_page_fault_resume_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME); + in.hdr.opmod = 0; + flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR | + MLX5_PAGE_FAULT_RESUME_WRITE | + MLX5_PAGE_FAULT_RESUME_RDMA); + flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0); + in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) | + (flags << MLX5_QPN_BITS)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c new file mode 100644 index 000000000..f9d25dcd0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include <linux/mlx5/srq.h> +#include <rdma/ib_verbs.h> +#include "mlx5_core.h" + +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + if (!srq) { + mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn); + return; + } + + srq->event(srq, event_type); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); +} + +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + return srq; +} +EXPORT_SYMBOL(mlx5_core_get_srq); + +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen) +{ + struct mlx5_create_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_destroy_srq_mbox_in din; + struct mlx5_destroy_srq_mbox_out dout; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + + atomic_set(&srq->refcount, 1); + init_completion(&srq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, srq->srqn, srq); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); + goto err_cmd; + } + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.srqn = cpu_to_be32(srq->srqn); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_srq); + +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) +{ + struct mlx5_destroy_srq_mbox_in in; + struct mlx5_destroy_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, srq->srqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn); + return -EINVAL; + } + if (tmp != srq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_srq); + +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + struct mlx5_query_srq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_srq); + +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq) +{ + struct mlx5_arm_srq_mbox_in in; + struct mlx5_arm_srq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); + in.hdr.opmod = cpu_to_be16(!!is_srq); + in.srqn = cpu_to_be32(srq->srqn); + in.lwm = cpu_to_be16(lwm); + + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_arm_srq); + +void mlx5_init_srq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev) +{ + /* nothing */ +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c new file mode 100644 index 000000000..5a89bb1d6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +enum { + NUM_DRIVER_UARS = 4, + NUM_LOW_LAT_UUARS = 4, +}; + + +struct mlx5_alloc_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) +{ + struct mlx5_alloc_uar_mbox_in in; + struct mlx5_alloc_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto ex; + } + + *uarn = be32_to_cpu(out.uarn) & 0xffffff; + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_alloc_uar); + +int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) +{ + struct mlx5_free_uar_mbox_in in; + struct mlx5_free_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); + in.uarn = cpu_to_be32(uarn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_free_uar); + +static int need_uuar_lock(int uuarn) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + + if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) + return 0; + + return 1; +} + +int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + struct mlx5_bf *bf; + phys_addr_t addr; + int err; + int i; + + uuari->num_uars = NUM_DRIVER_UARS; + uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + + mutex_init(&uuari->lock); + uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); + if (!uuari->uars) + return -ENOMEM; + + uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); + if (!uuari->bfs) { + err = -ENOMEM; + goto out_uars; + } + + uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), + GFP_KERNEL); + if (!uuari->bitmap) { + err = -ENOMEM; + goto out_bfs; + } + + uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); + if (!uuari->count) { + err = -ENOMEM; + goto out_bitmap; + } + + for (i = 0; i < uuari->num_uars; i++) { + err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); + if (err) + goto out_count; + + addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); + uuari->uars[i].map = ioremap(addr, PAGE_SIZE); + if (!uuari->uars[i].map) { + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + err = -ENOMEM; + goto out_count; + } + mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", + uuari->uars[i].index, uuari->uars[i].map); + } + + for (i = 0; i < tot_uuars; i++) { + bf = &uuari->bfs[i]; + + bf->buf_size = dev->caps.gen.bf_reg_size / 2; + bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; + bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; + bf->reg = NULL; /* Add WC support */ + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size + + MLX5_BF_OFFSET; + bf->need_lock = need_uuar_lock(i); + spin_lock_init(&bf->lock); + spin_lock_init(&bf->lock32); + bf->uuarn = i; + } + + return 0; + +out_count: + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + kfree(uuari->count); + +out_bitmap: + kfree(uuari->bitmap); + +out_bfs: + kfree(uuari->bfs); + +out_uars: + kfree(uuari->uars); + return err; +} + +int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int i = uuari->num_uars; + + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + + kfree(uuari->count); + kfree(uuari->bitmap); + kfree(uuari->bfs); + kfree(uuari->uars); + + return 0; +} |