* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
* Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
- * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000
+ * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
+ * - July2000
* bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
*/
/*
* For queue allocation
*/
-struct kmem_cache *blk_requestq_cachep = NULL;
+struct kmem_cache *blk_requestq_cachep;
/*
* Controlling structure to kblockd
error = -EIO;
if (unlikely(nbytes > bio->bi_size)) {
- printk("%s: want %u bytes done, only %u left\n",
+ printk(KERN_ERR "%s: want %u bytes done, %u left\n",
__FUNCTION__, nbytes, bio->bi_size);
nbytes = bio->bi_size;
}
{
int bit;
- printk("%s: dev %s: type=%x, flags=%x\n", msg,
+ printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
rq->cmd_flags);
- printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
- rq->nr_sectors,
- rq->current_nr_sectors);
- printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+ printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
+ (unsigned long long)rq->sector,
+ rq->nr_sectors,
+ rq->current_nr_sectors);
+ printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
+ rq->bio, rq->biotail,
+ rq->buffer, rq->data,
+ rq->data_len);
if (blk_pc_request(rq)) {
- printk("cdb: ");
+ printk(KERN_INFO " cdb: ");
for (bit = 0; bit < sizeof(rq->cmd); bit++)
printk("%02x ", rq->cmd[bit]);
printk("\n");
}
}
-
EXPORT_SYMBOL(blk_dump_rq_flags);
/*
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
}
-
EXPORT_SYMBOL(blk_plug_device);
/*
del_timer(&q->unplug_timer);
return 1;
}
-
EXPORT_SYMBOL(blk_remove_plug);
/*
kblockd_schedule_work(&q->unplug_work);
}
}
-
EXPORT_SYMBOL(blk_start_queue);
/**
}
EXPORT_SYMBOL(blk_put_queue);
-void blk_cleanup_queue(struct request_queue * q)
+void blk_cleanup_queue(struct request_queue *q)
{
mutex_lock(&q->sysfs_lock);
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
blk_put_queue(q);
}
-
EXPORT_SYMBOL(blk_cleanup_queue);
static int blk_init_free_list(struct request_queue *q)
return 1;
}
-
EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq)
*/
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
-
+
rq_init(q, rq);
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
elv_requeue_request(q, rq);
}
-
EXPORT_SYMBOL(blk_requeue_request);
/**
blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-
EXPORT_SYMBOL(blk_insert_request);
/*
* queue lock is held and interrupts disabled, as we muck with the
* request queue list.
*/
-static inline void add_request(struct request_queue * q, struct request * req)
+static inline void add_request(struct request_queue *q, struct request *req)
{
drive_stat_acct(req, 1);
*/
__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
}
-
+
/*
* disk_round_stats() - Round off the performance stats on a struct
* disk_stats.
}
disk->stamp = now;
}
-
EXPORT_SYMBOL_GPL(disk_round_stats);
/*
freed_request(q, rw, priv);
}
}
-
EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req)
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
-
EXPORT_SYMBOL(blk_put_request);
void init_request_from_bio(struct request *req, struct bio *bio)
el_ret = elv_merge(q, &req, bio);
switch (el_ret) {
- case ELEVATOR_BACK_MERGE:
- BUG_ON(!rq_mergeable(req));
+ case ELEVATOR_BACK_MERGE:
+ BUG_ON(!rq_mergeable(req));
- if (!ll_back_merge_fn(q, req, bio))
- break;
+ if (!ll_back_merge_fn(q, req, bio))
+ break;
- blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
- req->biotail->bi_next = bio;
- req->biotail = bio;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
- req->ioprio = ioprio_best(req->ioprio, prio);
- drive_stat_acct(req, 0);
- if (!attempt_back_merge(q, req))
- elv_merged_request(q, req, el_ret);
- goto out;
+ req->biotail->bi_next = bio;
+ req->biotail = bio;
+ req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
+ drive_stat_acct(req, 0);
+ if (!attempt_back_merge(q, req))
+ elv_merged_request(q, req, el_ret);
+ goto out;
- case ELEVATOR_FRONT_MERGE:
- BUG_ON(!rq_mergeable(req));
+ case ELEVATOR_FRONT_MERGE:
+ BUG_ON(!rq_mergeable(req));
- if (!ll_front_merge_fn(q, req, bio))
- break;
+ if (!ll_front_merge_fn(q, req, bio))
+ break;
- blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
- bio->bi_next = req->bio;
- req->bio = bio;
+ bio->bi_next = req->bio;
+ req->bio = bio;
- /*
- * may not be valid. if the low level driver said
- * it didn't need a bounce buffer then it better
- * not touch req->buffer either...
- */
- req->buffer = bio_data(bio);
- req->current_nr_sectors = bio_cur_sectors(bio);
- req->hard_cur_sectors = req->current_nr_sectors;
- req->sector = req->hard_sector = bio->bi_sector;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
- req->ioprio = ioprio_best(req->ioprio, prio);
- drive_stat_acct(req, 0);
- if (!attempt_front_merge(q, req))
- elv_merged_request(q, req, el_ret);
- goto out;
-
- /* ELV_NO_MERGE: elevator says don't/can't merge. */
- default:
- ;
+ /*
+ * may not be valid. if the low level driver said
+ * it didn't need a bounce buffer then it better
+ * not touch req->buffer either...
+ */
+ req->buffer = bio_data(bio);
+ req->current_nr_sectors = bio_cur_sectors(bio);
+ req->hard_cur_sectors = req->current_nr_sectors;
+ req->sector = req->hard_sector = bio->bi_sector;
+ req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->ioprio = ioprio_best(req->ioprio, prio);
+ drive_stat_acct(req, 0);
+ if (!attempt_front_merge(q, req))
+ elv_merged_request(q, req, el_ret);
+ goto out;
+
+ /* ELV_NO_MERGE: elevator says don't/can't merge. */
+ default:
+ ;
}
get_rq:
}
if (unlikely(nr_sectors > q->max_hw_sectors)) {
- printk("bio too big device %s (%u > %u)\n",
+ printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
q->max_hw_sectors);
} while (bio);
current->bio_tail = NULL; /* deactivate */
}
-
EXPORT_SYMBOL(generic_make_request);
/**
current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ",
(unsigned long long)bio->bi_sector,
- bdevname(bio->bi_bdev,b));
+ bdevname(bio->bi_bdev, b));
}
}
generic_make_request(bio);
}
-
EXPORT_SYMBOL(submit_bio);
/**
if (!blk_pc_request(req))
req->errors = 0;
- if (error) {
- if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
- printk("end_request: I/O error, dev %s, sector %llu\n",
+ if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
+ printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
(unsigned long long)req->sector);
}
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
blk_dump_rq_flags(req, "__end_that");
- printk("%s: bio idx %d >= vcnt %d\n",
- __FUNCTION__,
- bio->bi_idx, bio->bi_vcnt);
+ printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
+ __FUNCTION__, bio->bi_idx,
+ bio->bi_vcnt);
break;
}
total_bytes += nbytes;
nr_bytes -= nbytes;
- if ((bio = req->bio)) {
+ bio = req->bio;
+ if (bio) {
/*
* end more in this run, or just return 'not-done'
*/
local_irq_enable();
while (!list_empty(&local_list)) {
- struct request *rq = list_entry(local_list.next, struct request, donelist);
+ struct request *rq;
+ rq = list_entry(local_list.next, struct request, donelist);
list_del_init(&rq->donelist);
rq->q->softirq_done_fn(rq);
}
}
-static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
/*
* If a CPU goes away, splice its entries to the current CPU
unsigned long flags;
BUG_ON(!req->q->softirq_done_fn);
-
+
local_irq_save(flags);
cpu_list = &__get_cpu_var(blk_cpu_done);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(blk_complete_request);
-
+
/*
* queue lock must be held
*/
{
return queue_work(kblockd_workqueue, work);
}
-
EXPORT_SYMBOL(kblockd_schedule_work);
void kblockd_flush_work(struct work_struct *work)
#include "blk.h"
-unsigned long blk_max_low_pfn, blk_max_pfn;
+unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
+
+unsigned long blk_max_pfn;
EXPORT_SYMBOL(blk_max_pfn);
/**
{
q->prep_rq_fn = pfn;
}
-
EXPORT_SYMBOL(blk_queue_prep_rq);
/**
{
q->merge_bvec_fn = mbfn;
}
-
EXPORT_SYMBOL(blk_queue_merge_bvec);
void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
q->softirq_done_fn = fn;
}
-
EXPORT_SYMBOL(blk_queue_softirq_done);
/**
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
-void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
+void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
{
/*
* set defaults
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
q->make_request_fn = mfn;
- q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+ q->backing_dev_info.ra_pages =
+ (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
}
-
EXPORT_SYMBOL(blk_queue_make_request);
/**
**/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{
- unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
+ unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0;
q->bounce_gfp = GFP_NOIO;
/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
- if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+ if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
#else
- if (bounce_pfn < blk_max_low_pfn)
+ if (b_pfn < blk_max_low_pfn)
dma = 1;
- q->bounce_pfn = bounce_pfn;
+ q->bounce_pfn = b_pfn;
#endif
if (dma) {
init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA;
- q->bounce_pfn = bounce_pfn;
+ q->bounce_pfn = b_pfn;
}
}
-
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
- printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
+ printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+ max_sectors);
}
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->max_hw_sectors = max_sectors;
}
}
-
EXPORT_SYMBOL(blk_queue_max_sectors);
/**
{
if (!max_segments) {
max_segments = 1;
- printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+ printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+ max_segments);
}
q->max_phys_segments = max_segments;
}
-
EXPORT_SYMBOL(blk_queue_max_phys_segments);
/**
{
if (!max_segments) {
max_segments = 1;
- printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+ printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+ max_segments);
}
q->max_hw_segments = max_segments;
}
-
EXPORT_SYMBOL(blk_queue_max_hw_segments);
/**
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
- printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
+ printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
+ max_size);
}
q->max_segment_size = max_size;
}
-
EXPORT_SYMBOL(blk_queue_max_segment_size);
/**
{
q->hardsect_size = size;
}
-
EXPORT_SYMBOL(blk_queue_hardsect_size);
/*
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
/* zero is "infinity" */
- t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
- t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
+ t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
- t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
- t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
- t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
- t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+ t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
+ t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
+ t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
+ t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
}
-
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
return 0;
}
-
EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
/**
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
- printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
+ printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
+ mask);
}
q->seg_boundary_mask = mask;
}
-
EXPORT_SYMBOL(blk_queue_segment_boundary);
/**
{
q->dma_alignment = mask;
}
-
EXPORT_SYMBOL(blk_queue_dma_alignment);
/**
if (mask > q->dma_alignment)
q->dma_alignment = mask;
}
-
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
int __init blk_settings_init(void)