block: Rename blk_queue_max_sectors to blk_queue_max_hw_sectors
authorMartin K. Petersen <martin.petersen@oracle.com>
Fri, 26 Feb 2010 05:20:38 +0000 (00:20 -0500)
committerJens Axboe <jens.axboe@oracle.com>
Fri, 26 Feb 2010 12:58:08 +0000 (13:58 +0100)
The block layer calling convention is blk_queue_<limit name>.
blk_queue_max_sectors predates this practice, leading to some confusion.
Rename the function to appropriately reflect that its intended use is to
set max_hw_sectors.

Also introduce a temporary wrapper for backwards compability.  This can
be removed after the merge window is closed.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
41 files changed:
arch/um/drivers/ubd_kern.c
block/blk-settings.c
drivers/ata/libata-scsi.c
drivers/block/DAC960.c
drivers/block/brd.c
drivers/block/cciss.c
drivers/block/drbd/drbd_nl.c
drivers/block/floppy.c
drivers/block/hd.c
drivers/block/mg_disk.c
drivers/block/paride/pd.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/sunvdc.c
drivers/block/ub.c
drivers/block/viodasd.c
drivers/block/xd.c
drivers/block/xen-blkfront.c
drivers/cdrom/viocd.c
drivers/firewire/sbp2.c
drivers/ide/ide-disk.c
drivers/ide/ide-floppy.c
drivers/ide/ide-probe.c
drivers/ieee1394/sbp2.c
drivers/md/linear.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/memstick/core/mspro_block.c
drivers/message/i2o/i2o_block.c
drivers/mmc/card/queue.c
drivers/s390/block/dasd.c
drivers/s390/char/tape_block.c
drivers/scsi/ipr.c
drivers/scsi/pmcraid.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/usb/storage/scsiglue.c
include/linux/blkdev.h

index 5ff554677f4019ad4435bb6d8277b9ec72907d97..c2051b0737cbdc43815842d035d2c2aa8f38dadf 100644 (file)
@@ -747,7 +747,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
        ubd_dev->fd = fd;
 
        if(ubd_dev->cow.file != NULL){
-               blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long));
+               blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
 
                err = -ENOMEM;
                ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
index 3c53b0beb8dd7cc4a6834f8f969ebe6fdbbde461..61afae9dbc6d373c93f866382187dabd136b4655 100644 (file)
@@ -154,7 +154,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        q->unplug_timer.data = (unsigned long)q;
 
        blk_set_default_limits(&q->limits);
-       blk_queue_max_sectors(q, BLK_SAFE_MAX_SECTORS);
+       blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
 
        /*
         * If the caller didn't supply a lock, fall back to our embedded
@@ -210,7 +210,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
 EXPORT_SYMBOL(blk_queue_bounce_limit);
 
 /**
- * blk_queue_max_sectors - set max sectors for a request for this queue
+ * blk_queue_max_hw_sectors - set max sectors for a request for this queue
  * @q:  the request queue for the device
  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
  *
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
  *    The soft limit can not exceed max_hw_sectors.
  **/
-void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors)
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 {
        if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
                max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -237,7 +237,7 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors)
        q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
                                      BLK_DEF_MAX_SECTORS);
 }
-EXPORT_SYMBOL(blk_queue_max_sectors);
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
 /**
  * blk_queue_max_discard_sectors - set max sectors for a single discard
index d096fbcbc771a61a0537e28842d04f1149dcc7a7..bea003a24d277268fb7cd9337e9851269e0c809c 100644 (file)
@@ -1097,7 +1097,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
                dev->flags |= ATA_DFLAG_NO_UNLOAD;
 
        /* configure max sectors */
-       blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
+       blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);
 
        if (dev->class == ATA_DEV_ATAPI) {
                struct request_queue *q = sdev->request_queue;
index 7412b5d4f5f3e2d5ee0dd6e3f07d9362535705e5..1c0cd35e1913daa4614ac3928d27d65ace3cf46e 100644 (file)
@@ -2535,7 +2535,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
        RequestQueue->queuedata = Controller;
        blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
        blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
-       blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
+       blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
        disk->queue = RequestQueue;
        sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
        disk->major = MajorNumber;
index 4f688434daf1a220c95050f82bd4a5c9e1a5d277..c6ddeacb77fdaa2541f55c294ec25dfa0dd7004b 100644 (file)
@@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i)
                goto out_free_dev;
        blk_queue_make_request(brd->brd_queue, brd_make_request);
        blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
-       blk_queue_max_sectors(brd->brd_queue, 1024);
+       blk_queue_max_hw_sectors(brd->brd_queue, 1024);
        blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
 
        disk = brd->brd_disk = alloc_disk(1 << part_shift);
index 86acdca5d0cea88366faaae2f3a7ef8e4fa682e3..030e52d722545b8b3f069d0386a56094154b6836 100644 (file)
@@ -1802,7 +1802,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
        /* This is a limit in the driver and could be eliminated. */
        blk_queue_max_phys_segments(disk->queue, h->maxsgentries);
 
-       blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
+       blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
 
        blk_queue_softirq_done(disk->queue, cciss_softirq_done);
 
index 1292e062066337d40708d4449c560f9af61bb384..9b55e64196fcad92d241946cdd5c657af9275e1d 100644 (file)
@@ -709,7 +709,7 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
 
        max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
 
-       blk_queue_max_sectors(q, max_seg_s >> 9);
+       blk_queue_max_hw_sectors(q, max_seg_s >> 9);
        blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS);
        blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
        blk_queue_max_segment_size(q, max_seg_s);
index 3266b4f65daa37dec3110ff2220f8d4b7c1bfb42..b9b117059b624973b7d089009f5cb33632d8b02d 100644 (file)
@@ -4234,7 +4234,7 @@ static int __init floppy_init(void)
                err = -ENOMEM;
                goto out_unreg_driver;
        }
-       blk_queue_max_sectors(floppy_queue, 64);
+       blk_queue_max_hw_sectors(floppy_queue, 64);
 
        blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
                            floppy_find, NULL, NULL);
index d5cdce08ffd2765aa1f5b3cc83b399093bbf09e7..5116c65c07cb71e9384372faaf7a7d9fa8c29d89 100644 (file)
@@ -719,7 +719,7 @@ static int __init hd_init(void)
                return -ENOMEM;
        }
 
-       blk_queue_max_sectors(hd_queue, 255);
+       blk_queue_max_hw_sectors(hd_queue, 255);
        init_timer(&device_timer);
        device_timer.function = hd_times_out;
        blk_queue_logical_block_size(hd_queue, 512);
index 02b2583df7fc225184dbf704a28aade8d24128ec..5416c9a606e43694b59dc0dec09075b8257bf964 100644 (file)
@@ -980,7 +980,7 @@ static int mg_probe(struct platform_device *plat_dev)
                                __func__, __LINE__);
                goto probe_err_6;
        }
-       blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
+       blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS);
        blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
 
        init_timer(&host->timer);
index 569e39e8f1143fd4c896344fa25e3e653f8e6a89..e712cd51af1543c75ca13092c1662e551df256f4 100644 (file)
@@ -906,7 +906,7 @@ static int __init pd_init(void)
        if (!pd_queue)
                goto out1;
 
-       blk_queue_max_sectors(pd_queue, cluster);
+       blk_queue_max_hw_sectors(pd_queue, cluster);
 
        if (register_blkdev(major, name))
                goto out2;
index 7cd2973ebb7b4a4ad9582916bd57eab32c9e12bc..6e1daa24da2fea7bbec0463a84bdb3a2037b2918 100644 (file)
@@ -2312,7 +2312,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
                 * even if the size is a multiple of the packet size.
                 */
                spin_lock_irq(q->queue_lock);
-               blk_queue_max_sectors(q, pd->settings.size);
+               blk_queue_max_hw_sectors(q, pd->settings.size);
                spin_unlock_irq(q->queue_lock);
                set_bit(PACKET_WRITABLE, &pd->flags);
        } else {
@@ -2613,7 +2613,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
 
        blk_queue_make_request(q, pkt_make_request);
        blk_queue_logical_block_size(q, CD_FRAMESIZE);
-       blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
+       blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
        blk_queue_merge_bvec(q, pkt_merge_bvec);
        q->queuedata = pd;
 }
index 03a130dca8ab541ad81bbb7db2095da6a5a26551..9cd1a4a542b89bc46d7141fd3c8adb0a9e7076e8 100644 (file)
@@ -474,7 +474,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
 
        blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
 
-       blk_queue_max_sectors(queue, dev->bounce_size >> 9);
+       blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
        blk_queue_segment_boundary(queue, -1UL);
        blk_queue_dma_alignment(queue, dev->blk_size-1);
        blk_queue_logical_block_size(queue, dev->blk_size);
index a7ecb43b16abc15859195d2817f47e30ca7bb299..6416b262934bf248d29b2130e0d6213b29bb2836 100644 (file)
@@ -754,7 +754,7 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
        blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS);
        blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS);
        blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE);
-       blk_queue_max_sectors(queue, BLK_SAFE_MAX_SECTORS);
+       blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS);
 
        gendisk = alloc_disk(1);
        if (!gendisk) {
index 411f064760b41db69a4ffae999b92f24b89fba3b..dd30cddd0f7f0d4c5072fb85fb5491cc9a85da54 100644 (file)
@@ -693,7 +693,7 @@ static int probe_disk(struct vdc_port *port)
 
        blk_queue_max_hw_segments(q, port->ring_cookies);
        blk_queue_max_phys_segments(q, port->ring_cookies);
-       blk_queue_max_sectors(q, port->max_xfer_size);
+       blk_queue_max_hw_sectors(q, port->max_xfer_size);
        g->major = vdc_major;
        g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
        strcpy(g->disk_name, port->disk_name);
index d86d1357ccefea9fa127ae55ee7d4d8b12564888..352ea24d66e87636ce0e485f218a59a233399eea 100644 (file)
@@ -2323,7 +2323,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
        blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
        blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
        blk_queue_segment_boundary(q, 0xffffffff);      /* Dubious. */
-       blk_queue_max_sectors(q, UB_MAX_SECTORS);
+       blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
        blk_queue_logical_block_size(q, lun->capacity.bsize);
 
        lun->disk = disk;
index a8c8b56b275e546e02819d7889801b5545dbafb7..d44ece7d7b7c0370d8468f21dd650ea6291dc818 100644 (file)
@@ -473,7 +473,7 @@ retry:
        d->disk = g;
        blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
        blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
-       blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
+       blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
        g->major = VIODASD_MAJOR;
        g->first_minor = dev_no << PARTITION_SHIFT;
        if (dev_no >= 26)
index d1fd032e751494cb7f452c470383f14d45ab3ec7..1a325fb05c9282745013d53e541e13b7b748d1a8 100644 (file)
@@ -242,7 +242,7 @@ static int __init xd_init(void)
        }
 
        /* xd_maxsectors depends on controller - so set after detection */
-       blk_queue_max_sectors(xd_queue, xd_maxsectors);
+       blk_queue_max_hw_sectors(xd_queue, xd_maxsectors);
 
        for (i = 0; i < xd_drives; i++)
                add_disk(xd_gendisk[i]);
index a84702d1a35ec2f47b2a64f32ac75074f95624f8..f9861aaa1fefccdde412ecb88d938baac2ad60f7 100644 (file)
@@ -346,7 +346,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
 
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
        blk_queue_logical_block_size(rq, sector_size);
-       blk_queue_max_sectors(rq, 512);
+       blk_queue_max_hw_sectors(rq, 512);
 
        /* Each segment in a request is up to an aligned page in size. */
        blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
index 57ca69e0ac553a086ec1751441188c6415f4fe56..b1dfd23eb8328f3cda37ff1343f445c80f52c41f 100644 (file)
@@ -618,7 +618,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                        sizeof(gendisk->disk_name));
        blk_queue_max_hw_segments(q, 1);
        blk_queue_max_phys_segments(q, 1);
-       blk_queue_max_sectors(q, 4096 / 512);
+       blk_queue_max_hw_sectors(q, 4096 / 512);
        gendisk->queue = q;
        gendisk->fops = &viocd_fops;
        gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
index d485cdd8cbacf0a16410e50fb2b195c0107537f2..70fef40cd22f0f0d0d9c46add51943a51e6cea52 100644 (file)
@@ -1571,7 +1571,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
                sdev->start_stop_pwr_cond = 1;
 
        if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
-               blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+               blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
 
        blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
 
index 7f878017b736fcd913cfaea1e3939f10bb6cdf82..3b128dce9c3a15a203d3d7e13cb08bc9a14f283f 100644 (file)
@@ -679,7 +679,7 @@ static void ide_disk_setup(ide_drive_t *drive)
                if (max_s > hwif->rqsize)
                        max_s = hwif->rqsize;
 
-               blk_queue_max_sectors(q, max_s);
+               blk_queue_max_hw_sectors(q, max_s);
        }
 
        printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
index fefbdfc8db068868afe0dadb0914a948219ea824..efd907623469fbc92a5e372fda19a4d6e260574d 100644 (file)
@@ -486,7 +486,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
                drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
                /* This value will be visible in the /proc/ide/hdx/settings */
                drive->pc_delay = IDEFLOPPY_PC_DELAY;
-               blk_queue_max_sectors(drive->queue, 64);
+               blk_queue_max_hw_sectors(drive->queue, 64);
        }
 
        /*
@@ -494,7 +494,7 @@ static void ide_floppy_setup(ide_drive_t *drive)
         * nasty clicking noises without it, so please don't remove this.
         */
        if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) {
-               blk_queue_max_sectors(drive->queue, 64);
+               blk_queue_max_hw_sectors(drive->queue, 64);
                drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
                /* IOMEGA Clik! drives do not support lock/unlock commands */
                drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
index 4d76ba473097ac2ad89295181f9c8adb5433156a..1ec8b31277bdd98104f77ec10d8ddbd2c4d14139 100644 (file)
@@ -774,7 +774,7 @@ static int ide_init_queue(ide_drive_t *drive)
 
        if (hwif->rqsize < max_sectors)
                max_sectors = hwif->rqsize;
-       blk_queue_max_sectors(q, max_sectors);
+       blk_queue_max_hw_sectors(q, max_sectors);
 
 #ifdef CONFIG_PCI
        /* When we have an IOMMU, we may have a problem where pci_map_sg()
index f199896c41135c7b3117ef96b571dff01f72394c..c88696a6cf8aa8a5d79b726f3b22ac3790837f67 100644 (file)
@@ -2020,7 +2020,7 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
        if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
                sdev->start_stop_pwr_cond = 1;
        if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
-               blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+               blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
 
        blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
        return 0;
index 00435bd206990416c21cad98b549483b123a99c8..af2d39d603c74446c3c792d6bdc3ee334067d639 100644 (file)
@@ -177,7 +177,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
                 */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
                    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                       blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
 
                conf->array_sectors += rdev->sectors;
                cnt++;
index 32a662fc55c95d5f08de09362f85d0d6ab0917e3..4b323f45ad74a357432c6115822f4cc5347ef0c3 100644 (file)
@@ -308,7 +308,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                 */
                        if (q->merge_bvec_fn &&
                            queue_max_sectors(q) > (PAGE_SIZE>>9))
-                               blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                               blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        conf->working_disks++;
                        mddev->degraded--;
@@ -478,7 +478,7 @@ static int multipath_run (mddev_t *mddev)
                 * a merge_bvec_fn to be involved in multipath */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
                    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                       blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
 
                if (!test_bit(Faulty, &rdev->flags))
                        conf->working_disks++;
index 77605cdceaf179ce1a5ce15d9bf50d4239efb91e..a1f7147b757fd6f79231ef52721b587fbbfe4432 100644 (file)
@@ -182,7 +182,7 @@ static int create_strip_zones(mddev_t *mddev)
 
                if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
                    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                       blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
 
                if (!smallest || (rdev1->sectors < smallest->sectors))
                        smallest = rdev1;
@@ -325,7 +325,7 @@ static int raid0_run(mddev_t *mddev)
        }
        if (md_check_no_bitmap(mddev))
                return -EINVAL;
-       blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
+       blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
        mddev->queue->queue_lock = &mddev->queue->__queue_lock;
 
        ret = create_strip_zones(mddev);
index 859bd3ffe4351bad9204aa04e906e751a788c64a..5a06122abd3bbd9d49a121675e37dd95bd5ba4ff 100644 (file)
@@ -1158,7 +1158,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                         */
                        if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
                            queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                               blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                               blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        p->head_position = 0;
                        rdev->raid_disk = mirror;
@@ -2103,7 +2103,7 @@ static int run(mddev_t *mddev)
                 */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
                    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                       blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
        }
 
        mddev->degraded = 0;
index d119b7b75e71ffabe5863b0b3463b7a66304f831..7584f9ab9bcfedf159bf94fd1238f9321d14e866 100644 (file)
@@ -1161,7 +1161,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                         */
                        if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
                            queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                               blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                               blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        p->head_position = 0;
                        rdev->raid_disk = mirror;
@@ -2260,7 +2260,7 @@ static int run(mddev_t *mddev)
                 */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
                    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
+                       blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
 
                disk->head_position = 0;
        }
index bd83fa0a4970fb13a5ddef788d1c68a5a7b21300..44d4178c4c1577a7be0084ceb8765fcea1c7d6d0 100644 (file)
@@ -1226,7 +1226,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
        blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
 
        blk_queue_bounce_limit(msb->queue, limit);
-       blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
+       blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
        blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
        blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
        blk_queue_max_segment_size(msb->queue,
index e39986a7827399dbe607ebe70d140fc615d12af6..d033cfdb516f81052e2dcb0d69b83bcaa8e552fa 100644 (file)
@@ -1066,7 +1066,7 @@ static int i2o_block_probe(struct device *dev)
        queue->queuedata = i2o_blk_dev;
 
        blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS);
-       blk_queue_max_sectors(queue, max_sectors);
+       blk_queue_max_hw_sectors(queue, max_sectors);
        blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));
 
        osm_debug("max sectors = %d\n", queue->max_sectors);
index c5a7a855f4b1c5f0a8ad7472a8f7896127d03ea8..09b633d5657b706aab8bc81e3cdc23c6e9015cac 100644 (file)
@@ -154,7 +154,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
 
                if (mq->bounce_buf) {
                        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
-                       blk_queue_max_sectors(mq->queue, bouncesz / 512);
+                       blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
                        blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
                        blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
                        blk_queue_max_segment_size(mq->queue, bouncesz);
@@ -180,7 +180,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
 
        if (!mq->bounce_buf) {
                blk_queue_bounce_limit(mq->queue, limit);
-               blk_queue_max_sectors(mq->queue,
+               blk_queue_max_hw_sectors(mq->queue,
                        min(host->max_blk_count, host->max_req_size / 512));
                blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
                blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
index 5905936c7c60909a30bd539e301dd5c49d7c5014..14b1e25b9dcf05efb8d978f3fd7a3ddac3700362 100644 (file)
@@ -2129,7 +2129,7 @@ static void dasd_setup_queue(struct dasd_block *block)
 
        blk_queue_logical_block_size(block->request_queue, block->bp_block);
        max = block->base->discipline->max_blocks << block->s2b_shift;
-       blk_queue_max_sectors(block->request_queue, max);
+       blk_queue_max_hw_sectors(block->request_queue, max);
        blk_queue_max_phys_segments(block->request_queue, -1L);
        blk_queue_max_hw_segments(block->request_queue, -1L);
        /* with page sized segments we can translate each segement into
index 8d3d720737da20879f1ac85b1f9da6a7432e99cd..509ed056fdddc51daccdb1d35174344ccb701206 100644 (file)
@@ -222,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device)
                goto cleanup_queue;
 
        blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
-       blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
+       blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
        blk_queue_max_phys_segments(blkdat->request_queue, -1L);
        blk_queue_max_hw_segments(blkdat->request_queue, -1L);
        blk_queue_max_segment_size(blkdat->request_queue, -1L);
index 9e52d16c7c3908319e97eff73d85de1102ac8859..032f0d0e6cb454695dffaec23b4ddfc0a9fa279f 100644 (file)
@@ -3674,7 +3674,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
                if (ipr_is_vset_device(res)) {
                        blk_queue_rq_timeout(sdev->request_queue,
                                             IPR_VSET_RW_TIMEOUT);
-                       blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
+                       blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
                }
                if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
                        sdev->allow_restart = 1;
index b6f1ef954af1b469a75ca7ab96c1112b8034146c..9b1c1433c26bd1b538833397bd8e38d5f619a6da 100644 (file)
@@ -235,7 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
                scsi_dev->allow_restart = 1;
                blk_queue_rq_timeout(scsi_dev->request_queue,
                                     PMCRAID_VSET_IO_TIMEOUT);
-               blk_queue_max_sectors(scsi_dev->request_queue,
+               blk_queue_max_hw_sectors(scsi_dev->request_queue,
                                      PMCRAID_VSET_MAX_SECTORS);
        }
 
index c6642423cc6758ee5b07349cfffd830d7e45e33f..ac3cca74bdfb787660e3614bdf878ab9d6dc3d65 100644 (file)
@@ -1627,7 +1627,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
        blk_queue_max_hw_segments(q, shost->sg_tablesize);
        blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
 
-       blk_queue_max_sectors(q, shost->max_sectors);
+       blk_queue_max_hw_sectors(q, shost->max_sectors);
        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
        blk_queue_segment_boundary(q, shost->dma_boundary);
        dma_set_seg_boundary(dev, shost->dma_boundary);
index 012f73a96880e8b67546caeab4211c0a8c65f523..5d9b5130d8c8fca1109b1044c418ef66ba09f2ad 100644 (file)
@@ -879,7 +879,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
         * broken RA4x00 Compaq Disk Array
         */
        if (*bflags & BLIST_MAX_512)
-               blk_queue_max_sectors(sdev->request_queue, 512);
+               blk_queue_max_hw_sectors(sdev->request_queue, 512);
 
        /*
         * Some devices may not want to have a start command automatically
index e5e6df39e73707989711ea0284a4109d56409b44..aadc16b5eed79c8d6ee0801c948c3172f1e4a6e9 100644 (file)
@@ -134,14 +134,14 @@ static int slave_configure(struct scsi_device *sdev)
                if (us->fflags & US_FL_MAX_SECTORS_MIN)
                        max_sectors = PAGE_CACHE_SIZE >> 9;
                if (queue_max_sectors(sdev->request_queue) > max_sectors)
-                       blk_queue_max_sectors(sdev->request_queue,
+                       blk_queue_max_hw_sectors(sdev->request_queue,
                                              max_sectors);
        } else if (sdev->type == TYPE_TAPE) {
                /* Tapes need much higher max_sector limits, so just
                 * raise it to the maximum possible (4 GB / 512) and
                 * let the queue segment size sort out the real limit.
                 */
-               blk_queue_max_sectors(sdev->request_queue, 0x7FFFFF);
+               blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF);
        }
 
        /* Some USB host controllers can't do DMA; they have to use PIO.
@@ -495,7 +495,7 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
        unsigned short ms;
 
        if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) {
-               blk_queue_max_sectors(sdev->request_queue, ms);
+               blk_queue_max_hw_sectors(sdev->request_queue, ms);
                return strlen(buf);
        }
        return -EINVAL; 
index 5d378627f4469dd7fd5582f0cb5d49453aa83e1a..57bc48446e924df5f2a7dd027be6187768805ffd 100644 (file)
@@ -921,7 +921,14 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern void blk_cleanup_queue(struct request_queue *);
 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
 extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
+
+/* Temporary compatibility wrapper */
+static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
+{
+       blk_queue_max_hw_sectors(q, max);
+}
+
 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);