if (c->weight != w)
continue;
- for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
+ for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
if (!test_bit(j, used_mask))
break;
}
inc_irq_stat(apic_perf_irqs);
ack = status;
- for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
+ for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
clear_bit(bit, (unsigned long *) &status);
}
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
- for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
+ for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
tasklet_schedule(&chan->cleanup_task);
}
if (pending == 0)
continue;
- for_each_bit(offset, &pending, PL061_GPIO_NR)
+ for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->chip->unmask(irq);
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
- for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
+ for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
}
int i;
if (abort_source & DW_IC_TX_ABRT_NOACK) {
- for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
dev_dbg(dev->dev,
"%s: %s\n", __func__, abort_sources[i]);
return -EREMOTEIO;
}
- for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
if (abort_source & DW_IC_TX_ARB_LOST)
ack_irqs(ei);
/* Process all set pins. */
readval &= ei->irqs_enabled;
- for_each_bit(irqpin, &readval, ei->nirqs) {
+ for_each_set_bit(irqpin, &readval, ei->nirqs) {
/* Run irq handler */
pr_debug("got IRQ %d\n", irqpin);
irq = ei->irq_start + irqpin;
if (skb->data[0] == 0xff) {
/* we are being asked to broadcast to all partitions */
- for_each_bit(dest_partid, xpnet_broadcast_partitions,
+ for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
xp_max_npartitions) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
}
/* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_bit parses from right to left, which
+ * but, for_each_set_bit parses from right to left, which
* basically reverses the queue numbers */
for (i = 0; i< priv->num_grps; i++) {
priv->gfargrp[i].tx_bit_map = reverse_bitmap(
* also assign queues to groups */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
- for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
- for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
if (priv->mode == MQ_MG_MODE) {
baddr = ®s->txic0;
- for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
if (likely(priv->tx_queue[i]->txcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->tx_queue[i]->txic);
}
baddr = ®s->rxic0;
- for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
if (likely(priv->rx_queue[i]->rxcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
- for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
if (test_bit(i, &serviced_queues))
continue;
rx_queue = priv->rx_queue[i];
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_bit(...) */
+ /* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
adapter->num_rx_queues);
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_bit(...) */
+ /* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
adapter->num_rx_queues);
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
- for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) {
+ for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), i);
for (i = 0; i < __IWM_DM_NR; i++)
iwm->dbg.dbg_module[i] = 0;
- for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
+ for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
return 0;
return -EINVAL;
}
- for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
+ for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
tid_info = &sta_info->tid_info[bit];
mutex_lock(&tid_info->mutex);
break;
}
dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data;
- for_each_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) {
+ for_each_set_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) {
qbh = NULL;
status = ocfs2_read_quota_block(lqinode,
ol_dqblk_block(sb, chunk, bit),
*/
#include <asm/bitops.h>
-#define for_each_bit(bit, addr, size) \
+#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
+/* Temporary */
+#define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size)
static __inline__ int get_bitmask_order(unsigned int count)
{
}
#define for_each_cpupri_active(array, idx) \
- for_each_bit(idx, array, CPUPRI_NR_PRIORITIES)
+ for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES)
/**
* cpupri_find - find the best (lowest-pri) CPU in the system
{
int bit, reg;
- for_each_bit(bit, &uda1380_cache_dirty, UDA1380_CACHEREGNUM - 0x10) {
+ for_each_set_bit(bit, &uda1380_cache_dirty, UDA1380_CACHEREGNUM - 0x10) {
reg = 0x10 + bit;
pr_debug("uda1380: flush reg %x val %x:\n", reg,
uda1380_read_reg_cache(uda1380_codec, reg));