Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
mutex_init(&q->limits_lock);
mutex_init(&q->rq_qos_mutex);
spin_lock_init(&q->queue_lock);
atomic_set(&q->quiesce_depth, 0);

init_waitqueue_head(&q->mq_freeze_wq);
mutex_init(&q->mq_freeze_lock);
Expand Down
1 change: 0 additions & 1 deletion block/blk-mq-debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(INIT_DONE),
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(HCTX_ACTIVE),
QUEUE_FLAG_NAME(SQ_SCHED),
Expand Down
53 changes: 27 additions & 26 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,12 +260,13 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue_non_owner);
*/
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
unsigned long flags;

spin_lock_irqsave(&q->queue_lock, flags);
if (!q->quiesce_depth++)
blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
spin_unlock_irqrestore(&q->queue_lock, flags);
atomic_inc(&q->quiesce_depth);
/*
* Pairs with smp_rmb() in blk_mq_run_hw_queue(): make the
* incremented quiesce_depth observable to readers re-checking
* the quiesce state, so they don't dispatch on a quiesced queue.
*/
smp_mb__after_atomic();
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

Expand Down Expand Up @@ -314,21 +315,23 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
*/
void blk_mq_unquiesce_queue(struct request_queue *q)
{
unsigned long flags;
bool run_queue = false;
int depth;

spin_lock_irqsave(&q->queue_lock, flags);
if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
;
} else if (!--q->quiesce_depth) {
blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
run_queue = true;
}
spin_unlock_irqrestore(&q->queue_lock, flags);
depth = atomic_dec_if_positive(&q->quiesce_depth);
if (WARN_ON_ONCE(depth < 0))
return;

/* dispatch requests which are inserted during quiescing */
if (run_queue)
if (depth == 0) {
/*
* Pairs with smp_rmb() in blk_mq_run_hw_queue(): make the
* decrement of quiesce_depth observable before we kick the
* hw queues, so a concurrent blk_mq_run_hw_queue() that
* re-checks the state sees the queue as no longer quiesced.
*/
smp_mb__after_atomic();
/* dispatch requests which are inserted during quiescing */
blk_mq_run_hw_queues(q, true);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);

Expand Down Expand Up @@ -2362,17 +2365,15 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)

need_run = blk_mq_hw_queue_need_run(hctx);
if (!need_run) {
unsigned long flags;

/*
* Synchronize with blk_mq_unquiesce_queue(), because we check
* if hw queue is quiesced locklessly above, we need the use
* ->queue_lock to make sure we see the up-to-date status to
* not miss rerunning the hw queue.
* Re-check the quiesce state after a read barrier. Pairs with
* smp_mb__after_atomic() in blk_mq_quiesce_queue_nowait() and
* blk_mq_unquiesce_queue() so we don't miss rerunning the hw
* queue when a concurrent unquiesce has just dropped the
* quiesce_depth to zero.
*/
spin_lock_irqsave(&hctx->queue->queue_lock, flags);
smp_rmb();
need_run = blk_mq_hw_queue_need_run(hctx);
spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);

if (!need_run)
return;
Expand Down
9 changes: 6 additions & 3 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,8 @@ struct request_queue {

spinlock_t queue_lock;

int quiesce_depth;
/* Atomic quiesce depth - also serves as quiesced indicator (depth > 0) */
atomic_t quiesce_depth;

struct gendisk *disk;

Expand Down Expand Up @@ -666,7 +667,6 @@ enum {
QUEUE_FLAG_INIT_DONE, /* queue is initialized */
QUEUE_FLAG_STATS, /* track IO start and completion times */
QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
Expand Down Expand Up @@ -704,7 +704,10 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
static inline bool blk_queue_quiesced(struct request_queue *q)
{
return atomic_read(&q->quiesce_depth) > 0;
}
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
Expand Down