net/mlx5: DR, Limit STE hash table enlarge based on bytemask
authorAlex Vesker <valex@mellanox.com>
Sun, 10 Nov 2019 13:39:36 +0000 (15:39 +0200)
committerSaeed Mahameed <saeedm@mellanox.com>
Wed, 20 Nov 2019 20:33:05 +0000 (12:33 -0800)
When an ste hash table has too many collision we enlarge it
to a bigger hash table (rehash). Rehashing collision improvement
depends on the bytemask value. The more 1 bits we have in bytemask
means better spreading in the table.

Without this fix tables can grow in size without providing any
improvement which can lead to memory depletion and failures.

This patch will limit table rehash to reduce memory and improve
the performance.

Fixes: 41d07074154c ("net/mlx5: DR, Expose steering rule functionality")
Signed-off-by: Alex Vesker <valex@mellanox.com>
Reviewed-by: Erez Shitrit <erezsh@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c

index 5dcb8baf491a5dce2937626f55453690b655765b..bd1699e62142665b6c715f3517d1ddc84757ca4a 100644 (file)
@@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
        }
 }
 
+static u16 dr_get_bits_per_mask(u16 byte_mask)
+{
+       u16 bits = 0;
+
+       while (byte_mask) {
+               byte_mask = byte_mask & (byte_mask - 1);
+               bits++;
+       }
+
+       return bits;
+}
+
 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
                                      struct mlx5dr_domain *dmn,
                                      struct mlx5dr_domain_rx_tx *nic_dmn)
@@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
        if (!ctrl->may_grow)
                return false;
 
+       if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
+               return false;
+
        if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
            (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
                return true;
index 80680765d59cfeaa0d36253e2bdb460b180779fc..3cbf74b44d1f45e61314c2f37742eae146e1c07c 100644 (file)
@@ -560,18 +560,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
        return !refcount_read(&ste->refcount);
 }
 
-static u16 get_bits_per_mask(u16 byte_mask)
-{
-       u16 bits = 0;
-
-       while (byte_mask) {
-               byte_mask = byte_mask & (byte_mask - 1);
-               bits++;
-       }
-
-       return bits;
-}
-
 /* Init one ste as a pattern for ste data array */
 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
                                  struct mlx5dr_domain_rx_tx *nic_dmn,
@@ -620,20 +608,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
        struct mlx5dr_ste_htbl *next_htbl;
 
        if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
-               u32 bits_in_mask;
                u8 next_lu_type;
                u16 byte_mask;
 
                next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
                byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
 
-               /* Don't allocate table more than required,
-                * the size of the table defined via the byte_mask, so no need
-                * to allocate more than that.
-                */
-               bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
-               log_table_size = min(log_table_size, bits_in_mask);
-
                next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
                                                  log_table_size,
                                                  next_lu_type,