summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/alloc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 22:27:06 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 22:27:06 (GMT)
commit70e71ca0af244f48a5dcf56dc435243792e3a495 (patch)
treef7d9c4c4d9a857a00043e9bf6aa2d6f533a34778 /drivers/net/ethernet/mellanox/mlx4/alloc.c
parentbae41e45b7400496b9bf0c70c6004419d9987819 (diff)
parent00c83b01d58068dfeb2e1351cca6fccf2a83fa8f (diff)
downloadlinux-70e71ca0af244f48a5dcf56dc435243792e3a495.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) New offloading infrastructure and example 'rocker' driver for offloading of switching and routing to hardware. This work was done by a large group of dedicated individuals, not limited to: Scott Feldman, Jiri Pirko, Thomas Graf, John Fastabend, Jamal Hadi Salim, Andy Gospodarek, Florian Fainelli, Roopa Prabhu 2) Start making the networking operate on IOV iterators instead of modifying iov objects in-situ during transfers. Thanks to Al Viro and Herbert Xu. 3) A set of new netlink interfaces for the TIPC stack, from Richard Alpe. 4) Remove unnecessary looping during ipv6 routing lookups, from Martin KaFai Lau. 5) Add PAUSE frame generation support to gianfar driver, from Matei Pavaluca. 6) Allow for larger reordering levels in TCP, which are easily achievable in the real world right now, from Eric Dumazet. 7) Add a variable of napi_schedule that doesn't need to disable cpu interrupts, from Eric Dumazet. 8) Use a doubly linked list to optimize neigh_parms_release(), from Nicolas Dichtel. 9) Various enhancements to the kernel BPF verifier, and allow eBPF programs to actually be attached to sockets. From Alexei Starovoitov. 10) Support TSO/LSO in sunvnet driver, from David L Stevens. 11) Allow controlling ECN usage via routing metrics, from Florian Westphal. 12) Remote checksum offload, from Tom Herbert. 13) Add split-header receive, BQL, and xmit_more support to amd-xgbe driver, from Thomas Lendacky. 14) Add MPLS support to openvswitch, from Simon Horman. 15) Support wildcard tunnel endpoints in ipv6 tunnels, from Steffen Klassert. 16) Do gro flushes on a per-device basis using a timer, from Eric Dumazet. This tries to resolve the conflicting goals between the desired handling of bulk vs. RPC-like traffic. 17) Allow userspace to ask for the CPU upon what a packet was received/steered, via SO_INCOMING_CPU. From Eric Dumazet. 18) Limit GSO packets to half the current congestion window, from Eric Dumazet. 19) Add a generic helper so that all drivers set their RSS keys in a consistent way, from Eric Dumazet. 20) Add xmit_more support to enic driver, from Govindarajulu Varadarajan. 21) Add VLAN packet scheduler action, from Jiri Pirko. 22) Support configurable RSS hash functions via ethtool, from Eyal Perry. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1820 commits) Fix race condition between vxlan_sock_add and vxlan_sock_release net/macb: fix compilation warning for print_hex_dump() called with skb->mac_header net/mlx4: Add support for A0 steering net/mlx4: Refactor QUERY_PORT net/mlx4_core: Add explicit error message when rule doesn't meet configuration net/mlx4: Add A0 hybrid steering net/mlx4: Add mlx4_bitmap zone allocator net/mlx4: Add a check if there are too many reserved QPs net/mlx4: Change QP allocation scheme net/mlx4_core: Use tasklet for user-space CQ completion events net/mlx4_core: Mask out host side virtualization features for guests net/mlx4_en: Set csum level for encapsulated packets be2net: Export tunnel offloads only when a VxLAN tunnel is created gianfar: Fix dma check map error when DMA_API_DEBUG is enabled cxgb4/csiostor: Don't use MASTER_MUST for fw_hello call net: fec: only enable mdio interrupt before phy device link up net: fec: clear all interrupt events to support i.MX6SX net: fec: reset fep link status in suspend function net: sock: fix access via invalid file descriptor net: introduce helper macro for_each_cmsghdr ...
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/alloc.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c425
1 files changed, 419 insertions, 6 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b0297da..963dd7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -76,22 +76,53 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
-u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
+static unsigned long find_aligned_range(unsigned long *bitmap,
+ u32 start, u32 nbits,
+ int len, int align, u32 skip_mask)
+{
+ unsigned long end, i;
+
+again:
+ start = ALIGN(start, align);
+
+ while ((start < nbits) && (test_bit(start, bitmap) ||
+ (start & skip_mask)))
+ start += align;
+
+ if (start >= nbits)
+ return -1;
+
+ end = start+len;
+ if (end > nbits)
+ return -1;
+
+ for (i = start + 1; i < end; i++) {
+ if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
+ start = i + 1;
+ goto again;
+ }
+ }
+
+ return start;
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
+ int align, u32 skip_mask)
{
u32 obj;
- if (likely(cnt == 1 && align == 1))
+ if (likely(cnt == 1 && align == 1 && !skip_mask))
return mlx4_bitmap_alloc(bitmap);
spin_lock(&bitmap->lock);
- obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
- bitmap->last, cnt, align - 1);
+ obj = find_aligned_range(bitmap->table, bitmap->last,
+ bitmap->max, cnt, align, skip_mask);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
- obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
- 0, cnt, align - 1);
+ obj = find_aligned_range(bitmap->table, 0, bitmap->max,
+ cnt, align, skip_mask);
}
if (obj < bitmap->max) {
@@ -118,6 +149,11 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
return bitmap->avail;
}
+static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
+{
+ return obj & (bitmap->max + bitmap->reserved_top - 1);
+}
+
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
int use_rr)
{
@@ -147,6 +183,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
bitmap->avail = num - reserved_top - reserved_bot;
+ bitmap->effective_len = bitmap->avail;
spin_lock_init(&bitmap->lock);
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
sizeof (long), GFP_KERNEL);
@@ -163,6 +200,382 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
kfree(bitmap->table);
}
+struct mlx4_zone_allocator {
+ struct list_head entries;
+ struct list_head prios;
+ u32 last_uid;
+ u32 mask;
+ /* protect the zone_allocator from concurrent accesses */
+ spinlock_t lock;
+ enum mlx4_zone_alloc_flags flags;
+};
+
+struct mlx4_zone_entry {
+ struct list_head list;
+ struct list_head prio_list;
+ u32 uid;
+ struct mlx4_zone_allocator *allocator;
+ struct mlx4_bitmap *bitmap;
+ int use_rr;
+ int priority;
+ int offset;
+ enum mlx4_zone_flags flags;
+};
+
+struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
+{
+ struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
+
+ if (NULL == zones)
+ return NULL;
+
+ INIT_LIST_HEAD(&zones->entries);
+ INIT_LIST_HEAD(&zones->prios);
+ spin_lock_init(&zones->lock);
+ zones->last_uid = 0;
+ zones->mask = 0;
+ zones->flags = flags;
+
+ return zones;
+}
+
+int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
+ struct mlx4_bitmap *bitmap,
+ u32 flags,
+ int priority,
+ int offset,
+ u32 *puid)
+{
+ u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
+ struct mlx4_zone_entry *it;
+ struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
+
+ if (NULL == zone)
+ return -ENOMEM;
+
+ zone->flags = flags;
+ zone->bitmap = bitmap;
+ zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
+ zone->priority = priority;
+ zone->offset = offset;
+
+ spin_lock(&zone_alloc->lock);
+
+ zone->uid = zone_alloc->last_uid++;
+ zone->allocator = zone_alloc;
+
+ if (zone_alloc->mask < mask)
+ zone_alloc->mask = mask;
+
+ list_for_each_entry(it, &zone_alloc->prios, prio_list)
+ if (it->priority >= priority)
+ break;
+
+ if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
+ list_add_tail(&zone->prio_list, &it->prio_list);
+ list_add_tail(&zone->list, &it->list);
+
+ spin_unlock(&zone_alloc->lock);
+
+ *puid = zone->uid;
+
+ return 0;
+}
+
+/* Should be called under a lock */
+static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
+{
+ struct mlx4_zone_allocator *zone_alloc = entry->allocator;
+
+ if (!list_empty(&entry->prio_list)) {
+ /* Check if we need to add an alternative node to the prio list */
+ if (!list_is_last(&entry->list, &zone_alloc->entries)) {
+ struct mlx4_zone_entry *next = list_first_entry(&entry->list,
+ typeof(*next),
+ list);
+
+ if (next->priority == entry->priority)
+ list_add_tail(&next->prio_list, &entry->prio_list);
+ }
+
+ list_del(&entry->prio_list);
+ }
+
+ list_del(&entry->list);
+
+ if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
+ u32 mask = 0;
+ struct mlx4_zone_entry *it;
+
+ list_for_each_entry(it, &zone_alloc->prios, prio_list) {
+ u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
+
+ if (mask < cur_mask)
+ mask = cur_mask;
+ }
+ zone_alloc->mask = mask;
+ }
+
+ return 0;
+}
+
+void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
+{
+ struct mlx4_zone_entry *zone, *tmp;
+
+ spin_lock(&zone_alloc->lock);
+
+ list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
+ list_del(&zone->list);
+ list_del(&zone->prio_list);
+ kfree(zone);
+ }
+
+ spin_unlock(&zone_alloc->lock);
+ kfree(zone_alloc);
+}
+
+/* Should be called under a lock */
+static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
+ int align, u32 skip_mask, u32 *puid)
+{
+ u32 uid;
+ u32 res;
+ struct mlx4_zone_allocator *zone_alloc = zone->allocator;
+ struct mlx4_zone_entry *curr_node;
+
+ res = mlx4_bitmap_alloc_range(zone->bitmap, count,
+ align, skip_mask);
+
+ if (res != (u32)-1) {
+ res += zone->offset;
+ uid = zone->uid;
+ goto out;
+ }
+
+ list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
+ if (unlikely(curr_node->priority == zone->priority))
+ break;
+ }
+
+ if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
+ struct mlx4_zone_entry *it = curr_node;
+
+ list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
+ res = mlx4_bitmap_alloc_range(it->bitmap, count,
+ align, skip_mask);
+ if (res != (u32)-1) {
+ res += it->offset;
+ uid = it->uid;
+ goto out;
+ }
+ }
+ }
+
+ if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
+ struct mlx4_zone_entry *it = curr_node;
+
+ list_for_each_entry_from(it, &zone_alloc->entries, list) {
+ if (unlikely(it == zone))
+ continue;
+
+ if (unlikely(it->priority != curr_node->priority))
+ break;
+
+ res = mlx4_bitmap_alloc_range(it->bitmap, count,
+ align, skip_mask);
+ if (res != (u32)-1) {
+ res += it->offset;
+ uid = it->uid;
+ goto out;
+ }
+ }
+ }
+
+ if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
+ if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
+ goto out;
+
+ curr_node = list_first_entry(&curr_node->prio_list,
+ typeof(*curr_node),
+ prio_list);
+
+ list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
+ res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
+ align, skip_mask);
+ if (res != (u32)-1) {
+ res += curr_node->offset;
+ uid = curr_node->uid;
+ goto out;
+ }
+ }
+ }
+
+out:
+ if (NULL != puid && res != (u32)-1)
+ *puid = uid;
+ return res;
+}
+
+/* Should be called under a lock */
+static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
+ u32 count)
+{
+ mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
+}
+
+/* Should be called under a lock */
+static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
+ struct mlx4_zone_allocator *zones, u32 uid)
+{
+ struct mlx4_zone_entry *zone;
+
+ list_for_each_entry(zone, &zones->entries, list) {
+ if (zone->uid == uid)
+ return zone;
+ }
+
+ return NULL;
+}
+
+struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
+{
+ struct mlx4_zone_entry *zone;
+ struct mlx4_bitmap *bitmap;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ bitmap = zone == NULL ? NULL : zone->bitmap;
+
+ spin_unlock(&zones->lock);
+
+ return bitmap;
+}
+
+int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
+{
+ struct mlx4_zone_entry *zone;
+ int res;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ if (NULL == zone) {
+ res = -1;
+ goto out;
+ }
+
+ res = __mlx4_zone_remove_one_entry(zone);
+
+out:
+ spin_unlock(&zones->lock);
+ kfree(zone);
+
+ return res;
+}
+
+/* Should be called under a lock */
+static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
+ struct mlx4_zone_allocator *zones, u32 obj)
+{
+ struct mlx4_zone_entry *zone, *zone_candidate = NULL;
+ u32 dist = (u32)-1;
+
+ /* Search for the smallest zone that this obj could be
+ * allocated from. This is done in order to handle
+ * situations when small bitmaps are allocated from bigger
+ * bitmaps (and the allocated space is marked as reserved in
+ * the bigger bitmap.
+ */
+ list_for_each_entry(zone, &zones->entries, list) {
+ if (obj >= zone->offset) {
+ u32 mobj = (obj - zone->offset) & zones->mask;
+
+ if (mobj < zone->bitmap->max) {
+ u32 curr_dist = zone->bitmap->effective_len;
+
+ if (curr_dist < dist) {
+ dist = curr_dist;
+ zone_candidate = zone;
+ }
+ }
+ }
+ }
+
+ return zone_candidate;
+}
+
+u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
+ int align, u32 skip_mask, u32 *puid)
+{
+ struct mlx4_zone_entry *zone;
+ int res = -1;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ if (NULL == zone)
+ goto out;
+
+ res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
+
+out:
+ spin_unlock(&zones->lock);
+
+ return res;
+}
+
+u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
+{
+ struct mlx4_zone_entry *zone;
+ int res = 0;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid(zones, uid);
+
+ if (NULL == zone) {
+ res = -1;
+ goto out;
+ }
+
+ __mlx4_free_from_zone(zone, obj, count);
+
+out:
+ spin_unlock(&zones->lock);
+
+ return res;
+}
+
+u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
+{
+ struct mlx4_zone_entry *zone;
+ int res;
+
+ if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
+ return -EFAULT;
+
+ spin_lock(&zones->lock);
+
+ zone = __mlx4_find_zone_by_uid_unique(zones, obj);
+
+ if (NULL == zone) {
+ res = -1;
+ goto out;
+ }
+
+ __mlx4_free_from_zone(zone, obj, count);
+ res = 0;
+
+out:
+ spin_unlock(&zones->lock);
+
+ return res;
+}
/*
* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0. If the