if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
+ new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
- new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring[0]->count) &&
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
- f->indices = min((int)num_online_cpus(), f->indices);
+ f->indices = min_t(int, num_online_cpus(), f->indices);
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
return false;
/* Map queue offset and counts onto allocated tx queues */
- per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
- q = min((int)num_online_cpus(), per_tc_q);
+ per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
+ q = min_t(int, num_online_cpus(), per_tc_q);
for (i = 0; i < tcs; i++) {
netdev_set_tc_queue(dev, i, q, offset);
hw->subsystem_device_id = pdev->subsystem_device;
/* Set capability flags */
- rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
switch (hw->mac.type) {