Skip to content

Commit 7eaceac

Browse files
author
Jens Axboe
committed
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <[email protected]>
1 parent 73c1010 commit 7eaceac

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

119 files changed

+151
-1269
lines changed

Documentation/block/biodoc.txt

-5
Original file line numberDiff line numberDiff line change
@@ -963,11 +963,6 @@ elevator_dispatch_fn* fills the dispatch queue with ready requests.
963963

964964
elevator_add_req_fn* called to add a new request into the scheduler
965965

966-
elevator_queue_empty_fn returns true if the merge queue is empty.
967-
Drivers shouldn't use this, but rather check
968-
if elv_next_request is NULL (without losing the
969-
request if one exists!)
970-
971966
elevator_former_req_fn
972967
elevator_latter_req_fn These return the request before or after the
973968
one specified in disk sort order. Used by the

block/blk-core.c

+20-153
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,19 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198198
}
199199
EXPORT_SYMBOL(blk_dump_rq_flags);
200200

201+
/*
202+
* Make sure that plugs that were pending when this function was entered,
203+
* are now complete and requests pushed to the queue.
204+
*/
205+
static inline void queue_sync_plugs(struct request_queue *q)
206+
{
207+
/*
208+
* If the current process is plugged and has barriers submitted,
209+
* we will livelock if we don't unplug first.
210+
*/
211+
blk_flush_plug(current);
212+
}
213+
201214
static void blk_delay_work(struct work_struct *work)
202215
{
203216
struct request_queue *q;
@@ -224,137 +237,6 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
224237
}
225238
EXPORT_SYMBOL(blk_delay_queue);
226239

227-
/*
228-
* "plug" the device if there are no outstanding requests: this will
229-
* force the transfer to start only after we have put all the requests
230-
* on the list.
231-
*
232-
* This is called with interrupts off and no requests on the queue and
233-
* with the queue lock held.
234-
*/
235-
void blk_plug_device(struct request_queue *q)
236-
{
237-
WARN_ON(!irqs_disabled());
238-
239-
/*
240-
* don't plug a stopped queue, it must be paired with blk_start_queue()
241-
* which will restart the queueing
242-
*/
243-
if (blk_queue_stopped(q))
244-
return;
245-
246-
if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
247-
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
248-
trace_block_plug(q);
249-
}
250-
}
251-
EXPORT_SYMBOL(blk_plug_device);
252-
253-
/**
254-
* blk_plug_device_unlocked - plug a device without queue lock held
255-
* @q: The &struct request_queue to plug
256-
*
257-
* Description:
258-
* Like @blk_plug_device(), but grabs the queue lock and disables
259-
* interrupts.
260-
**/
261-
void blk_plug_device_unlocked(struct request_queue *q)
262-
{
263-
unsigned long flags;
264-
265-
spin_lock_irqsave(q->queue_lock, flags);
266-
blk_plug_device(q);
267-
spin_unlock_irqrestore(q->queue_lock, flags);
268-
}
269-
EXPORT_SYMBOL(blk_plug_device_unlocked);
270-
271-
/*
272-
* remove the queue from the plugged list, if present. called with
273-
* queue lock held and interrupts disabled.
274-
*/
275-
int blk_remove_plug(struct request_queue *q)
276-
{
277-
WARN_ON(!irqs_disabled());
278-
279-
if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
280-
return 0;
281-
282-
del_timer(&q->unplug_timer);
283-
return 1;
284-
}
285-
EXPORT_SYMBOL(blk_remove_plug);
286-
287-
/*
288-
* remove the plug and let it rip..
289-
*/
290-
void __generic_unplug_device(struct request_queue *q)
291-
{
292-
if (unlikely(blk_queue_stopped(q)))
293-
return;
294-
if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
295-
return;
296-
297-
q->request_fn(q);
298-
}
299-
300-
/**
301-
* generic_unplug_device - fire a request queue
302-
* @q: The &struct request_queue in question
303-
*
304-
* Description:
305-
* Linux uses plugging to build bigger requests queues before letting
306-
* the device have at them. If a queue is plugged, the I/O scheduler
307-
* is still adding and merging requests on the queue. Once the queue
308-
* gets unplugged, the request_fn defined for the queue is invoked and
309-
* transfers started.
310-
**/
311-
void generic_unplug_device(struct request_queue *q)
312-
{
313-
if (blk_queue_plugged(q)) {
314-
spin_lock_irq(q->queue_lock);
315-
__generic_unplug_device(q);
316-
spin_unlock_irq(q->queue_lock);
317-
}
318-
}
319-
EXPORT_SYMBOL(generic_unplug_device);
320-
321-
static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
322-
struct page *page)
323-
{
324-
struct request_queue *q = bdi->unplug_io_data;
325-
326-
blk_unplug(q);
327-
}
328-
329-
void blk_unplug_work(struct work_struct *work)
330-
{
331-
struct request_queue *q =
332-
container_of(work, struct request_queue, unplug_work);
333-
334-
trace_block_unplug_io(q);
335-
q->unplug_fn(q);
336-
}
337-
338-
void blk_unplug_timeout(unsigned long data)
339-
{
340-
struct request_queue *q = (struct request_queue *)data;
341-
342-
trace_block_unplug_timer(q);
343-
kblockd_schedule_work(q, &q->unplug_work);
344-
}
345-
346-
void blk_unplug(struct request_queue *q)
347-
{
348-
/*
349-
* devices don't necessarily have an ->unplug_fn defined
350-
*/
351-
if (q->unplug_fn) {
352-
trace_block_unplug_io(q);
353-
q->unplug_fn(q);
354-
}
355-
}
356-
EXPORT_SYMBOL(blk_unplug);
357-
358240
/**
359241
* blk_start_queue - restart a previously stopped queue
360242
* @q: The &struct request_queue in question
@@ -389,7 +271,6 @@ EXPORT_SYMBOL(blk_start_queue);
389271
**/
390272
void blk_stop_queue(struct request_queue *q)
391273
{
392-
blk_remove_plug(q);
393274
cancel_delayed_work(&q->delay_work);
394275
queue_flag_set(QUEUE_FLAG_STOPPED, q);
395276
}
@@ -411,11 +292,10 @@ EXPORT_SYMBOL(blk_stop_queue);
411292
*/
412293
void blk_sync_queue(struct request_queue *q)
413294
{
414-
del_timer_sync(&q->unplug_timer);
415295
del_timer_sync(&q->timeout);
416-
cancel_work_sync(&q->unplug_work);
417296
throtl_shutdown_timer_wq(q);
418297
cancel_delayed_work_sync(&q->delay_work);
298+
queue_sync_plugs(q);
419299
}
420300
EXPORT_SYMBOL(blk_sync_queue);
421301

@@ -430,25 +310,18 @@ EXPORT_SYMBOL(blk_sync_queue);
430310
*/
431311
void __blk_run_queue(struct request_queue *q)
432312
{
433-
blk_remove_plug(q);
434-
435313
if (unlikely(blk_queue_stopped(q)))
436314
return;
437315

438-
if (elv_queue_empty(q))
439-
return;
440-
441316
/*
442317
* Only recurse once to avoid overrunning the stack, let the unplug
443318
* handling reinvoke the handler shortly if we already got there.
444319
*/
445320
if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
446321
q->request_fn(q);
447322
queue_flag_clear(QUEUE_FLAG_REENTER, q);
448-
} else {
449-
queue_flag_set(QUEUE_FLAG_PLUGGED, q);
450-
kblockd_schedule_work(q, &q->unplug_work);
451-
}
323+
} else
324+
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
452325
}
453326
EXPORT_SYMBOL(__blk_run_queue);
454327

@@ -535,8 +408,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
535408
if (!q)
536409
return NULL;
537410

538-
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
539-
q->backing_dev_info.unplug_io_data = q;
540411
q->backing_dev_info.ra_pages =
541412
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
542413
q->backing_dev_info.state = 0;
@@ -556,13 +427,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
556427

557428
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
558429
laptop_mode_timer_fn, (unsigned long) q);
559-
init_timer(&q->unplug_timer);
560430
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
561431
INIT_LIST_HEAD(&q->timeout_list);
562432
INIT_LIST_HEAD(&q->flush_queue[0]);
563433
INIT_LIST_HEAD(&q->flush_queue[1]);
564434
INIT_LIST_HEAD(&q->flush_data_in_flight);
565-
INIT_WORK(&q->unplug_work, blk_unplug_work);
566435
INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
567436

568437
kobject_init(&q->kobj, &blk_queue_ktype);
@@ -652,7 +521,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
652521
q->request_fn = rfn;
653522
q->prep_rq_fn = NULL;
654523
q->unprep_rq_fn = NULL;
655-
q->unplug_fn = generic_unplug_device;
656524
q->queue_flags = QUEUE_FLAG_DEFAULT;
657525
q->queue_lock = lock;
658526

@@ -910,8 +778,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
910778
}
911779

912780
/*
913-
* No available requests for this queue, unplug the device and wait for some
914-
* requests to become available.
781+
* No available requests for this queue, wait for some requests to become
782+
* available.
915783
*
916784
* Called with q->queue_lock held, and returns with it unlocked.
917785
*/
@@ -932,7 +800,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
932800

933801
trace_block_sleeprq(q, bio, rw_flags & 1);
934802

935-
__generic_unplug_device(q);
936803
spin_unlock_irq(q->queue_lock);
937804
io_schedule();
938805

@@ -1058,7 +925,7 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
1058925
int where)
1059926
{
1060927
drive_stat_acct(rq, 1);
1061-
__elv_add_request(q, rq, where, 0);
928+
__elv_add_request(q, rq, where);
1062929
}
1063930

1064931
/**
@@ -2798,7 +2665,7 @@ static void flush_plug_list(struct blk_plug *plug)
27982665
/*
27992666
* rq is already accounted, so use raw insert
28002667
*/
2801-
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0);
2668+
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
28022669
}
28032670

28042671
if (q) {

block/blk-exec.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
5454
rq->end_io = done;
5555
WARN_ON(irqs_disabled());
5656
spin_lock_irq(q->queue_lock);
57-
__elv_add_request(q, rq, where, 1);
58-
__generic_unplug_device(q);
57+
__elv_add_request(q, rq, where);
58+
__blk_run_queue(q);
5959
/* the queue is stopped so it won't be plugged+unplugged */
6060
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
6161
q->request_fn(q);

block/blk-flush.c

+1-2
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error)
194194
{
195195
struct request_queue *q = flush_rq->q;
196196
struct list_head *running = &q->flush_queue[q->flush_running_idx];
197-
bool was_empty = elv_queue_empty(q);
198197
bool queued = false;
199198
struct request *rq, *n;
200199

@@ -213,7 +212,7 @@ static void flush_end_io(struct request *flush_rq, int error)
213212
}
214213

215214
/* after populating an empty queue, kick it to avoid stall */
216-
if (queued && was_empty)
215+
if (queued)
217216
__blk_run_queue(q);
218217
}
219218

block/blk-settings.c

-8
Original file line numberDiff line numberDiff line change
@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
164164
blk_queue_congestion_threshold(q);
165165
q->nr_batching = BLK_BATCH_REQ;
166166

167-
q->unplug_thresh = 4; /* hmm */
168-
q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
169-
if (q->unplug_delay == 0)
170-
q->unplug_delay = 1;
171-
172-
q->unplug_timer.function = blk_unplug_timeout;
173-
q->unplug_timer.data = (unsigned long)q;
174-
175167
blk_set_default_limits(&q->limits);
176168
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177169

block/blk-throttle.c

-1
Original file line numberDiff line numberDiff line change
@@ -800,7 +800,6 @@ static int throtl_dispatch(struct request_queue *q)
800800
if (nr_disp) {
801801
while((bio = bio_list_pop(&bio_list_on_stack)))
802802
generic_make_request(bio);
803-
blk_unplug(q);
804803
}
805804
return nr_disp;
806805
}

block/blk.h

-2
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
1818
void blk_dequeue_request(struct request *rq);
1919
void __blk_queue_free_tags(struct request_queue *q);
2020

21-
void blk_unplug_work(struct work_struct *work);
22-
void blk_unplug_timeout(unsigned long data);
2321
void blk_rq_timed_out_timer(unsigned long data);
2422
void blk_delete_timer(struct request *);
2523
void blk_add_timer(struct request *);

block/cfq-iosched.c

-8
Original file line numberDiff line numberDiff line change
@@ -499,13 +499,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
499499
}
500500
}
501501

502-
static int cfq_queue_empty(struct request_queue *q)
503-
{
504-
struct cfq_data *cfqd = q->elevator->elevator_data;
505-
506-
return !cfqd->rq_queued;
507-
}
508-
509502
/*
510503
* Scale schedule slice based on io priority. Use the sync time slice only
511504
* if a queue is marked sync and has sync io queued. A sync queue with async
@@ -4061,7 +4054,6 @@ static struct elevator_type iosched_cfq = {
40614054
.elevator_add_req_fn = cfq_insert_request,
40624055
.elevator_activate_req_fn = cfq_activate_request,
40634056
.elevator_deactivate_req_fn = cfq_deactivate_request,
4064-
.elevator_queue_empty_fn = cfq_queue_empty,
40654057
.elevator_completed_req_fn = cfq_completed_request,
40664058
.elevator_former_req_fn = elv_rb_former_request,
40674059
.elevator_latter_req_fn = elv_rb_latter_request,

block/deadline-iosched.c

-9
Original file line numberDiff line numberDiff line change
@@ -326,14 +326,6 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
326326
return 1;
327327
}
328328

329-
static int deadline_queue_empty(struct request_queue *q)
330-
{
331-
struct deadline_data *dd = q->elevator->elevator_data;
332-
333-
return list_empty(&dd->fifo_list[WRITE])
334-
&& list_empty(&dd->fifo_list[READ]);
335-
}
336-
337329
static void deadline_exit_queue(struct elevator_queue *e)
338330
{
339331
struct deadline_data *dd = e->elevator_data;
@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
445437
.elevator_merge_req_fn = deadline_merged_requests,
446438
.elevator_dispatch_fn = deadline_dispatch_requests,
447439
.elevator_add_req_fn = deadline_add_request,
448-
.elevator_queue_empty_fn = deadline_queue_empty,
449440
.elevator_former_req_fn = elv_rb_former_request,
450441
.elevator_latter_req_fn = elv_rb_latter_request,
451442
.elevator_init_fn = deadline_init_queue,

0 commit comments

Comments
 (0)