diff options
Diffstat (limited to 'drivers')
48 files changed, 81 insertions, 110 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d096fbc..bea003a 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1097,7 +1097,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, dev->flags |= ATA_DFLAG_NO_UNLOAD; /* configure max sectors */ - blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); + blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); if (dev->class == ATA_DEV_ATAPI) { struct request_queue *q = sdev->request_queue; diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 0c82d33..684fe04 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -772,7 +772,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev) } blk_queue_segment_boundary(sdev->request_queue, segment_boundary); - blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); + blk_queue_max_segments(sdev->request_queue, sg_tablesize); ata_port_printk(ap, KERN_INFO, "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", (unsigned long long)*ap->host->dev->dma_mask, diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 7412b5d..459f1bc 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2534,8 +2534,8 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); RequestQueue->queuedata = Controller; blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); - blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); - blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand); + blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); + blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); disk->queue = RequestQueue; sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); disk->major = MajorNumber; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 4f68843..c6ddeac 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i) goto out_free_dev; blk_queue_make_request(brd->brd_queue, brd_make_request); blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); - blk_queue_max_sectors(brd->brd_queue, 1024); + blk_queue_max_hw_sectors(brd->brd_queue, 1024); blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); disk = brd->brd_disk = alloc_disk(1 << part_shift); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 86acdca5..a29e694 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1797,12 +1797,9 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); /* This is a hardware imposed limit. */ - blk_queue_max_hw_segments(disk->queue, h->maxsgentries); + blk_queue_max_segments(disk->queue, h->maxsgentries); - /* This is a limit in the driver and could be eliminated. */ - blk_queue_max_phys_segments(disk->queue, h->maxsgentries); - - blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); + blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); blk_queue_softirq_done(disk->queue, cciss_softirq_done); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 6422651..91d1163 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); /* This is a hardware imposed limit. */ - blk_queue_max_hw_segments(q, SG_MAX); + blk_queue_max_segments(q, SG_MAX); - /* This is a driver limit and could be eliminated. */ - blk_queue_max_phys_segments(q, SG_MAX); - init_timer(&hba[i]->timer); hba[i]->timer.expires = jiffies + IDA_TIMER; hba[i]->timer.data = (unsigned long)hba[i]; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1292e06..4df3b40 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -709,9 +709,8 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); - blk_queue_max_sectors(q, max_seg_s >> 9); - blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); - blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); + blk_queue_max_hw_sectors(q, max_seg_s >> 9); + blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_max_segment_size(q, max_seg_s); blk_queue_logical_block_size(q, 512); blk_queue_segment_boundary(q, PAGE_SIZE-1); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 3266b4f..b9b1170 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4234,7 +4234,7 @@ static int __init floppy_init(void) err = -ENOMEM; goto out_unreg_driver; } - blk_queue_max_sectors(floppy_queue, 64); + blk_queue_max_hw_sectors(floppy_queue, 64); blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, floppy_find, NULL, NULL); diff --git a/drivers/block/hd.c b/drivers/block/hd.c index d5cdce0..5116c65 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -719,7 +719,7 @@ static int __init hd_init(void) return -ENOMEM; } - blk_queue_max_sectors(hd_queue, 255); + blk_queue_max_hw_sectors(hd_queue, 255); init_timer(&device_timer); device_timer.function = hd_times_out; blk_queue_logical_block_size(hd_queue, 512); diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 02b2583..5416c9a 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -980,7 +980,7 @@ static int mg_probe(struct platform_device *plat_dev) __func__, __LINE__); goto probe_err_6; } - blk_queue_max_sectors(host->breq, MG_MAX_SECTS); + blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS); blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); init_timer(&host->timer); diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 569e39e..e712cd5 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -906,7 +906,7 @@ static int __init pd_init(void) if (!pd_queue) goto out1; - blk_queue_max_sectors(pd_queue, cluster); + blk_queue_max_hw_sectors(pd_queue, cluster); if (register_blkdev(major, name)) goto out2; diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ea54ea3..ddb4f9a 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -956,8 +956,7 @@ static int __init pf_init(void) return -ENOMEM; } - blk_queue_max_phys_segments(pf_queue, cluster); - blk_queue_max_hw_segments(pf_queue, cluster); + blk_queue_max_segments(pf_queue, cluster); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { struct gendisk *disk = pf->disk; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 7cd2973..b72935b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -950,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) { if ((pd->settings.size << 9) / CD_FRAMESIZE - <= queue_max_phys_segments(q)) { + <= queue_max_segments(q)) { /* * The cdrom device can handle one segment/frame */ clear_bit(PACKET_MERGE_SEGS, &pd->flags); return 0; } else if ((pd->settings.size << 9) / PAGE_SIZE - <= queue_max_phys_segments(q)) { + <= queue_max_segments(q)) { /* * We can handle this case at the expense of some extra memory * copies during write operations @@ -2312,7 +2312,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) * even if the size is a multiple of the packet size. */ spin_lock_irq(q->queue_lock); - blk_queue_max_sectors(q, pd->settings.size); + blk_queue_max_hw_sectors(q, pd->settings.size); spin_unlock_irq(q->queue_lock); set_bit(PACKET_WRITABLE, &pd->flags); } else { @@ -2613,7 +2613,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd) blk_queue_make_request(q, pkt_make_request); blk_queue_logical_block_size(q, CD_FRAMESIZE); - blk_queue_max_sectors(q, PACKET_MAX_SECTORS); + blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); blk_queue_merge_bvec(q, pkt_merge_bvec); q->queuedata = pd; } diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 03a130d..bc95469 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -474,7 +474,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); - blk_queue_max_sectors(queue, dev->bounce_size >> 9); + blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); blk_queue_segment_boundary(queue, -1UL); blk_queue_dma_alignment(queue, dev->blk_size-1); blk_queue_logical_block_size(queue, dev->blk_size); @@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, ps3disk_prepare_flush); - blk_queue_max_phys_segments(queue, -1); - blk_queue_max_hw_segments(queue, -1); + blk_queue_max_segments(queue, -1); blk_queue_max_segment_size(queue, dev->bounce_size); gendisk = alloc_disk(PS3DISK_MINORS); diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 1fb6c31..83ebb39 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -751,10 +751,9 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) priv->queue = queue; queue->queuedata = dev; blk_queue_make_request(queue, ps3vram_make_request); - blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); - blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS); - blk_queue_max_segment_size(queue, MAX_SEGMENT_SIZE); - blk_queue_max_sectors(queue, SAFE_MAX_SECTORS); + blk_queue_max_segments(queue, BLK_MAX_HW_SEGMENTS); + blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); + blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); gendisk = alloc_disk(1); if (!gendisk) { diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 411f064..48e8fee 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -691,9 +691,8 @@ static int probe_disk(struct vdc_port *port) port->disk = g; - blk_queue_max_hw_segments(q, port->ring_cookies); - blk_queue_max_phys_segments(q, port->ring_cookies); - blk_queue_max_sectors(q, port->max_xfer_size); + blk_queue_max_segments(q, port->ring_cookies); + blk_queue_max_hw_sectors(q, port->max_xfer_size); g->major = vdc_major; g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; strcpy(g->disk_name, port->disk_name); diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 7bd7b2f..b70f0fc 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host) break; } disk->queue = q; - blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); - blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG); + blk_queue_max_segments(q, CARM_MAX_REQ_SG); blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); q->queuedata = port; diff --git a/drivers/block/ub.c b/drivers/block/ub.c index d86d135..2e88983 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -2320,10 +2320,9 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->queue = q; blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); - blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); + blk_queue_max_segments(q, UB_MAX_REQ_SG); blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ - blk_queue_max_sectors(q, UB_MAX_SECTORS); + blk_queue_max_hw_sectors(q, UB_MAX_SECTORS); blk_queue_logical_block_size(q, lun->capacity.bsize); lun->disk = disk; diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index a8c8b56..c12b313 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -471,9 +471,8 @@ retry: } d->disk = g; - blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); - blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA); - blk_queue_max_sectors(q, VIODASD_MAXSECTORS); + blk_queue_max_segments(q, VIOMAXBLOCKDMA); + blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); g->major = VIODASD_MAJOR; g->first_minor = dev_no << PARTITION_SHIFT; if (dev_no >= 26) diff --git a/drivers/block/xd.c b/drivers/block/xd.c index d1fd032..1a325fb 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -242,7 +242,7 @@ static int __init xd_init(void) } /* xd_maxsectors depends on controller - so set after detection */ - blk_queue_max_sectors(xd_queue, xd_maxsectors); + blk_queue_max_hw_sectors(xd_queue, xd_maxsectors); for (i = 0; i < xd_drives; i++) add_disk(xd_gendisk[i]); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a84702d..9c09694 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -346,15 +346,14 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) /* Hard sector size and max sectors impersonate the equiv. hardware. */ blk_queue_logical_block_size(rq, sector_size); - blk_queue_max_sectors(rq, 512); + blk_queue_max_hw_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ blk_queue_segment_boundary(rq, PAGE_SIZE - 1); blk_queue_max_segment_size(rq, PAGE_SIZE); /* Ensure a merged request will fit in a single I/O ring slot. */ - blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); - blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); + blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); /* Make sure buffer addresses are sector-aligned. */ blk_queue_dma_alignment(rq, 511); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index e789e6c..03c71f7 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -741,7 +741,7 @@ static int __devinit probe_gdrom_setupqueue(void) { blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); /* using DMA so memory will need to be contiguous */ - blk_queue_max_hw_segments(gd.gdrom_rq, 1); + blk_queue_max_segments(gd.gdrom_rq, 1); /* set a large max size to get most from DMA */ blk_queue_max_segment_size(gd.gdrom_rq, 0x40000); gd.disk->queue = gd.gdrom_rq; diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 57ca69e..cc435be 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -616,9 +616,8 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) gendisk->first_minor = deviceno; strncpy(gendisk->disk_name, c->name, sizeof(gendisk->disk_name)); - blk_queue_max_hw_segments(q, 1); - blk_queue_max_phys_segments(q, 1); - blk_queue_max_sectors(q, 4096 / 512); + blk_queue_max_segments(q, 1); + blk_queue_max_hw_sectors(q, 4096 / 512); gendisk->queue = q; gendisk->fops = &viocd_fops; gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index d485cdd..70fef40 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1571,7 +1571,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) sdev->start_stop_pwr_cond = 1; if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) - blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); + blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 7f87801..3b128dc 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -679,7 +679,7 @@ static void ide_disk_setup(ide_drive_t *drive) if (max_s > hwif->rqsize) max_s = hwif->rqsize; - blk_queue_max_sectors(q, max_s); + blk_queue_max_hw_sectors(q, max_s); } printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index fefbdfc..efd9076 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -486,7 +486,7 @@ static void ide_floppy_setup(ide_drive_t *drive) drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE; /* This value will be visible in the /proc/ide/hdx/settings */ drive->pc_delay = IDEFLOPPY_PC_DELAY; - blk_queue_max_sectors(drive->queue, 64); + blk_queue_max_hw_sectors(drive->queue, 64); } /* @@ -494,7 +494,7 @@ static void ide_floppy_setup(ide_drive_t *drive) * nasty clicking noises without it, so please don't remove this. */ if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) { - blk_queue_max_sectors(drive->queue, 64); + blk_queue_max_hw_sectors(drive->queue, 64); drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE; /* IOMEGA Clik! drives do not support lock/unlock commands */ drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4d76ba4..f8c1ae6 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -774,7 +774,7 @@ static int ide_init_queue(ide_drive_t *drive) if (hwif->rqsize < max_sectors) max_sectors = hwif->rqsize; - blk_queue_max_sectors(q, max_sectors); + blk_queue_max_hw_sectors(q, max_sectors); #ifdef CONFIG_PCI /* When we have an IOMMU, we may have a problem where pci_map_sg() @@ -790,8 +790,7 @@ static int ide_init_queue(ide_drive_t *drive) max_sg_entries >>= 1; #endif /* CONFIG_PCI */ - blk_queue_max_hw_segments(q, max_sg_entries); - blk_queue_max_phys_segments(q, max_sg_entries); + blk_queue_max_segments(q, max_sg_entries); /* assign drive queue */ drive->queue = q; diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f199896..c88696a 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -2020,7 +2020,7 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev) if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION) sdev->start_stop_pwr_cond = 1; if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) - blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); + blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); return 0; diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 00435bd..af2d39d 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -177,7 +177,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); conf->array_sectors += rdev->sectors; cnt++; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 32a662f..4b323f4 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -308,7 +308,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) */ if (q->merge_bvec_fn && queue_max_sectors(q) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); conf->working_disks++; mddev->degraded--; @@ -478,7 +478,7 @@ static int multipath_run (mddev_t *mddev) * a merge_bvec_fn to be involved in multipath */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); if (!test_bit(Faulty, &rdev->flags)) conf->working_disks++; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 77605cd..a1f7147 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -182,7 +182,7 @@ static int create_strip_zones(mddev_t *mddev) if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); if (!smallest || (rdev1->sectors < smallest->sectors)) smallest = rdev1; @@ -325,7 +325,7 @@ static int raid0_run(mddev_t *mddev) } if (md_check_no_bitmap(mddev)) return -EINVAL; - blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors); + blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); mddev->queue->queue_lock = &mddev->queue->__queue_lock; ret = create_strip_zones(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 859bd3f..5a06122 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1158,7 +1158,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); p->head_position = 0; rdev->raid_disk = mirror; @@ -2103,7 +2103,7 @@ static int run(mddev_t *mddev) */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); } mddev->degraded = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d119b7b..7584f9a 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1161,7 +1161,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); p->head_position = 0; rdev->raid_disk = mirror; @@ -2260,7 +2260,7 @@ static int run(mddev_t *mddev) */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); + blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); disk->head_position = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ceb24af..509c8f3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3739,7 +3739,7 @@ static int bio_fits_rdev(struct bio *bi) if ((bi->bi_size>>9) > queue_max_sectors(q)) return 0; blk_recount_segments(q, bi); - if (bi->bi_phys_segments > queue_max_phys_segments(q)) + if (bi->bi_phys_segments > queue_max_segments(q)) return 0; if (q->merge_bvec_fn) diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index bd83fa0..972b870 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -1226,9 +1226,8 @@ static int mspro_block_init_disk(struct memstick_dev *card) blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); blk_queue_bounce_limit(msb->queue, limit); - blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); - blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); - blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); + blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); + blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); blk_queue_max_segment_size(msb->queue, MSPRO_BLOCK_MAX_PAGES * msb->page_size); diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index e39986a..2658b14 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -1065,9 +1065,8 @@ static int i2o_block_probe(struct device *dev) queue = gd->queue; queue->queuedata = i2o_blk_dev; - blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); - blk_queue_max_sectors(queue, max_sectors); - blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); + blk_queue_max_hw_sectors(queue, max_sectors); + blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); osm_debug("max sectors = %d\n", queue->max_sectors); osm_debug("phys segments = %d\n", queue->max_phys_segments); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index c5a7a85..381fe03 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -154,9 +154,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); - blk_queue_max_sectors(mq->queue, bouncesz / 512); - blk_queue_max_phys_segments(mq->queue, bouncesz / 512); - blk_queue_max_hw_segments(mq->queue, bouncesz / 512); + blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); + blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), @@ -180,10 +179,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); - blk_queue_max_sectors(mq->queue, + blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); - blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); - blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); + blk_queue_max_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 5905936..8831e93 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2129,9 +2129,8 @@ static void dasd_setup_queue(struct dasd_block *block) blk_queue_logical_block_size(block->request_queue, block->bp_block); max = block->base->discipline->max_blocks << block->s2b_shift; - blk_queue_max_sectors(block->request_queue, max); - blk_queue_max_phys_segments(block->request_queue, -1L); - blk_queue_max_hw_segments(block->request_queue, -1L); + blk_queue_max_hw_sectors(block->request_queue, max); + blk_queue_max_segments(block->request_queue, -1L); /* with page sized segments we can translate each segement into * one idaw/tidaw */ diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 8d3d720..097da8c 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -222,9 +222,8 @@ tapeblock_setup_device(struct tape_device * device) goto cleanup_queue; blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); - blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); - blk_queue_max_phys_segments(blkdat->request_queue, -1L); - blk_queue_max_hw_segments(blkdat->request_queue, -1L); + blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); + blk_queue_max_segments(blkdat->request_queue, -1L); blk_queue_max_segment_size(blkdat->request_queue, -1L); blk_queue_segment_boundary(blkdat->request_queue, -1L); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 87b536a..732f6d3 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4195,7 +4195,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) if (tgt->service_parms.class3_parms[0] & 0x80000000) rport->supported_classes |= FC_COS_CLASS3; if (rport->rqst_q) - blk_queue_max_hw_segments(rport->rqst_q, 1); + blk_queue_max_segments(rport->rqst_q, 1); } else tgt_dbg(tgt, "rport add failed\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); @@ -4669,7 +4669,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) } if (shost_to_fc_host(shost)->rqst_q) - blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1); + blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); dev_set_drvdata(dev, vhost); spin_lock(&ibmvfc_driver_lock); list_add_tail(&vhost->queue, &ibmvfc_head); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9e52d16..032f0d0 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -3674,7 +3674,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) if (ipr_is_vset_device(res)) { blk_queue_rq_timeout(sdev->request_queue, IPR_VSET_RW_TIMEOUT); - blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); + blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); } if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) sdev->allow_restart = 1; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index b6f1ef9..9b1c143 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -235,7 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev) scsi_dev->allow_restart = 1; blk_queue_rq_timeout(scsi_dev->request_queue, PMCRAID_VSET_IO_TIMEOUT); - blk_queue_max_sectors(scsi_dev->request_queue, + blk_queue_max_hw_sectors(scsi_dev->request_queue, PMCRAID_VSET_MAX_SECTORS); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c664242..f8fbf47 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1624,10 +1624,10 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, /* * this limit is imposed by hardware restrictions */ - blk_queue_max_hw_segments(q, shost->sg_tablesize); - blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); + blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, + SCSI_MAX_SG_CHAIN_SEGMENTS)); - blk_queue_max_sectors(q, shost->max_sectors); + blk_queue_max_hw_sectors(q, shost->max_sectors); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_segment_boundary(q, shost->dma_boundary); dma_set_seg_boundary(dev, shost->dma_boundary); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 012f73a..5d9b513 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -879,7 +879,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, * broken RA4x00 Compaq Disk Array */ if (*bflags & BLIST_MAX_512) - blk_queue_max_sectors(sdev->request_queue, 512); + blk_queue_max_hw_sectors(sdev->request_queue, 512); /* * Some devices may not want to have a start command automatically diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 040f751..c996d98 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -287,8 +287,7 @@ sg_open(struct inode *inode, struct file *filp) if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ sdp->sgdebug = 0; q = sdp->device->request_queue; - sdp->sg_tablesize = min(queue_max_hw_segments(q), - queue_max_phys_segments(q)); + sdp->sg_tablesize = queue_max_segments(q); } if ((sfp = sg_add_sfp(sdp, dev))) filp->private_data = sfp; @@ -1376,8 +1375,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) sdp->device = scsidp; INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->o_excl_wait); - sdp->sg_tablesize = min(queue_max_hw_segments(q), - queue_max_phys_segments(q)); + sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index d04ea9a..f67d1a1 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3983,8 +3983,7 @@ static int st_probe(struct device *dev) return -ENODEV; } - i = min(queue_max_hw_segments(SDp->request_queue), - queue_max_phys_segments(SDp->request_queue)); + i = queue_max_segments(SDp->request_queue); if (st_max_sg_segs < i) i = st_max_sg_segs; buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 62b2828..45d9081 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c @@ -363,10 +363,7 @@ static int blkvsc_probe(struct device *device) blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); - blk_queue_max_phys_segments(blkdev->gd->queue, - MAX_MULTIPAGE_BUFFER_COUNT); - blk_queue_max_hw_segments(blkdev->gd->queue, - MAX_MULTIPAGE_BUFFER_COUNT); + blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); blk_queue_dma_alignment(blkdev->gd->queue, 511); diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index e5e6df3..aadc16b 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -134,14 +134,14 @@ static int slave_configure(struct scsi_device *sdev) if (us->fflags & US_FL_MAX_SECTORS_MIN) max_sectors = PAGE_CACHE_SIZE >> 9; if (queue_max_sectors(sdev->request_queue) > max_sectors) - blk_queue_max_sectors(sdev->request_queue, + blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); } else if (sdev->type == TYPE_TAPE) { /* Tapes need much higher max_sector limits, so just * raise it to the maximum possible (4 GB / 512) and * let the queue segment size sort out the real limit. */ - blk_queue_max_sectors(sdev->request_queue, 0x7FFFFF); + blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF); } /* Some USB host controllers can't do DMA; they have to use PIO. @@ -495,7 +495,7 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at unsigned short ms; if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { - blk_queue_max_sectors(sdev->request_queue, ms); + blk_queue_max_hw_sectors(sdev->request_queue, ms); return strlen(buf); } return -EINVAL; |