Commit 044f1daa authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes and updates from Jens Axboe:
 "Some fixes and followup features/changes that should go in, in this
  merge window. This contains:

   - Two fixes for lightnvm from Javier, fixing problems in the new code
     merge previously in this merge window.

   - A fix from Jan for the backing device changes, fixing an issue in
     NFS that causes a failure to mount on certain setups.

   - A change from Christoph, cleaning up the blk-mq init and exit
     request paths.

   - Remove elevator_change(), which is now unused. From Bart.

   - A fix for queue operation invocation on a dead queue, from Bart.

   - A series fixing up mtip32xx for blk-mq scheduling, removing a
     bandaid we previously had in place for this. From me.

   - A regression fix for this series, fixing a case where we wait on
     workqueue flushing from an invalid (non-blocking) context. From me.

   - A fix/optimization from Ming, ensuring that we don't both quiesce
     and freeze a queue at the same time.

   - A fix from Peter on lock ordering for CPU hotplug. Not a real
     problem right now, but will be once the CPU hotplug rework goes in.

   - A series from Omar, cleaning up out blk-mq debugfs support, and
     adding support for exporting info from schedulers in debugfs as
     well. This is really useful in debugging stalls or livelocks. From
     Omar"

* 'for-linus' of git://git.kernel.dk/linux-block: (28 commits)
  mq-deadline: add debugfs attributes
  kyber: add debugfs attributes
  blk-mq-debugfs: allow schedulers to register debugfs attributes
  blk-mq: untangle debugfs and sysfs
  blk-mq: move debugfs declarations to a separate header file
  blk-mq: Do not invoke queue operations on a dead queue
  blk-mq-debugfs: get rid of a bunch of boilerplate
  blk-mq-debugfs: rename hw queue directories from <n> to hctx<n>
  blk-mq-debugfs: don't open code strstrip()
  blk-mq-debugfs: error on long write to queue "state" file
  blk-mq-debugfs: clean up flag definitions
  blk-mq-debugfs: separate flags with |
  nfs: Fix bdi handling for cloned superblocks
  block/mq: Cure cpu hotplug lock inversion
  lightnvm: fix bad back free on error path
  lightnvm: create cmd before allocating request
  blk-mq: don't use sync workqueue flushing from drivers
  mtip32xx: convert internal commands to regular block infrastructure
  mtip32xx: cleanup internal tag assumptions
  block: don't call blk_mq_quiesce_queue() after queue is frozen
  ...
parents d557d1b5 daaadb3e
......@@ -561,13 +561,9 @@ void blk_cleanup_queue(struct request_queue *q)
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
if (!q->mq_ops) {
spin_lock_irq(lock);
spin_lock_irq(lock);
if (!q->mq_ops)
__blk_drain_queue(q, true);
} else {
blk_mq_debugfs_unregister_mq(q);
spin_lock_irq(lock);
}
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
......
This diff is collapsed.
#ifndef INT_BLK_MQ_DEBUGFS_H
#define INT_BLK_MQ_DEBUGFS_H
#ifdef CONFIG_BLK_DEBUG_FS
#include <linux/seq_file.h>
struct blk_mq_debugfs_attr {
const char *name;
umode_t mode;
int (*show)(void *, struct seq_file *);
ssize_t (*write)(void *, const char __user *, size_t, loff_t *);
/* Set either .show or .seq_ops. */
const struct seq_operations *seq_ops;
};
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
int blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
int blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
#else
static inline int blk_mq_debugfs_register(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
{
}
static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}
#endif
#endif
......@@ -11,6 +11,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"
......@@ -82,11 +83,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
/*
* For a reserved tag, allocate a normal request since we might
* have driver dependencies on the value of the internal tag.
*/
if (e && !(data->flags & BLK_MQ_REQ_RESERVED)) {
if (e) {
data->flags |= BLK_MQ_REQ_INTERNAL;
/*
......@@ -476,6 +473,8 @@ int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
}
}
blk_mq_debugfs_register_sched_hctx(q, hctx);
return 0;
}
......@@ -487,6 +486,8 @@ void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
if (!e)
return;
blk_mq_debugfs_unregister_sched_hctx(hctx);
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
hctx->sched_data = NULL;
......@@ -523,8 +524,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
if (ret)
goto err;
if (e->ops.mq.init_hctx) {
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_register_sched(q);
queue_for_each_hw_ctx(q, hctx, i) {
if (e->ops.mq.init_hctx) {
ret = e->ops.mq.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
......@@ -533,6 +536,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return ret;
}
}
blk_mq_debugfs_register_sched_hctx(q, hctx);
}
return 0;
......@@ -548,14 +552,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
if (e->type->ops.mq.exit_hctx) {
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, i);
hctx->sched_data = NULL;
}
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_unregister_sched_hctx(hctx);
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, i);
hctx->sched_data = NULL;
}
}
blk_mq_debugfs_unregister_sched(q);
if (e->type->ops.mq.exit_sched)
e->type->ops.mq.exit_sched(e);
blk_mq_sched_tags_teardown(q);
......
......@@ -258,8 +258,6 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
blk_mq_debugfs_unregister_mq(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
......@@ -318,8 +316,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
blk_mq_debugfs_register(q);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
......@@ -335,8 +331,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
while (--i >= 0)
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
blk_mq_debugfs_unregister_mq(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
......@@ -364,8 +358,6 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
blk_mq_debugfs_unregister_mq(q);
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
......@@ -382,8 +374,6 @@ int blk_mq_sysfs_register(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
blk_mq_debugfs_register_mq(q);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
......
......@@ -31,6 +31,7 @@
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
#include "blk-wbt.h"
......@@ -41,6 +42,7 @@ static LIST_HEAD(all_q_list);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
......@@ -166,7 +168,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
unsigned int i;
bool rcu = false;
blk_mq_stop_hw_queues(q);
__blk_mq_stop_hw_queues(q, true);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
......@@ -1218,20 +1220,34 @@ bool blk_mq_queue_stopped(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_queue_stopped);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
{
cancel_delayed_work_sync(&hctx->run_work);
if (sync)
cancel_delayed_work_sync(&hctx->run_work);
else
cancel_delayed_work(&hctx->run_work);
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
__blk_mq_stop_hw_queue(hctx, false);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
void blk_mq_stop_hw_queues(struct request_queue *q)
void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
{
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_stop_hw_queue(hctx);
__blk_mq_stop_hw_queue(hctx, sync);
}
void blk_mq_stop_hw_queues(struct request_queue *q)
{
__blk_mq_stop_hw_queues(q, false);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);
......@@ -1655,8 +1671,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
if (!rq)
continue;
set->ops->exit_request(set->driver_data, rq,
hctx_idx, i);
set->ops->exit_request(set, rq, hctx_idx);
tags->static_rqs[i] = NULL;
}
}
......@@ -1787,8 +1802,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
tags->static_rqs[i] = rq;
if (set->ops->init_request) {
if (set->ops->init_request(set->driver_data,
rq, hctx_idx, i,
if (set->ops->init_request(set, rq, hctx_idx,
node)) {
tags->static_rqs[i] = NULL;
goto fail;
......@@ -1849,14 +1863,12 @@ static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
unsigned flush_start_tag = set->queue_depth;
blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
set->ops->exit_request(set->driver_data,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
......@@ -1889,7 +1901,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
int node;
unsigned flush_start_tag = set->queue_depth;
node = hctx->numa_node;
if (node == NUMA_NO_NODE)
......@@ -1933,14 +1944,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx, node))
set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
node))
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(&hctx->queue_rq_srcu);
blk_mq_debugfs_register_hctx(q, hctx);
return 0;
free_fq:
......@@ -2329,15 +2341,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
get_online_cpus();
mutex_lock(&all_q_mutex);
get_online_cpus();
list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q, cpu_online_mask);
mutex_unlock(&all_q_mutex);
put_online_cpus();
mutex_unlock(&all_q_mutex);
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret;
......@@ -2378,6 +2390,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
/*
......@@ -2389,6 +2402,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_map_swqueue(q, online_mask);
blk_mq_sysfs_register(q);
blk_mq_debugfs_register_hctxs(q);
}
/*
......@@ -2617,7 +2631,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return -EINVAL;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
......@@ -2643,7 +2656,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
q->nr_requests = nr;
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
return ret;
}
......
......@@ -83,34 +83,6 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
/*
* debugfs helpers
*/
#ifdef CONFIG_BLK_DEBUG_FS
int blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_mq(struct request_queue *q);
void blk_mq_debugfs_unregister_mq(struct request_queue *q);
#else
static inline int blk_mq_debugfs_register(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_mq(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_mq(struct request_queue *q)
{
}
#endif
extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
void blk_mq_release(struct request_queue *q);
......
......@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-wbt.h"
struct queue_sysfs_entry {
......@@ -889,6 +890,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
kobject_uevent(&q->kobj, KOBJ_ADD);
wbt_enable_default(q);
......
......@@ -950,7 +950,6 @@ static int elevator_switch_mq(struct request_queue *q,
int ret;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
......@@ -978,9 +977,7 @@ static int elevator_switch_mq(struct request_queue *q,
out:
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
return ret;
}
/*
......@@ -1088,19 +1085,6 @@ static int __elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, e);
}
int elevator_change(struct request_queue *q, const char *name)
{
int ret;
/* Protect q->elevator from elevator_init() */
mutex_lock(&q->sysfs_lock);
ret = __elevator_change(q, name);
mutex_unlock(&q->sysfs_lock);
return ret;
}
EXPORT_SYMBOL(elevator_change);
static inline bool elv_support_iosched(struct request_queue *q)
{
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
......
......@@ -26,6 +26,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
......@@ -683,6 +684,131 @@ static struct elv_fs_entry kyber_sched_attrs[] = {
};
#undef KYBER_LAT_ATTR
#ifdef CONFIG_BLK_DEBUG_FS
#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
{ \
struct request_queue *q = data; \
struct kyber_queue_data *kqd = q->elevator->elevator_data; \
\
sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
return 0; \
} \
\
static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
__acquires(&khd->lock) \
{ \
struct blk_mq_hw_ctx *hctx = m->private; \
struct kyber_hctx_data *khd = hctx->sched_data; \
\
spin_lock(&khd->lock); \
return seq_list_start(&khd->rqs[domain], *pos); \
} \
\
static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
loff_t *pos) \
{ \
struct blk_mq_hw_ctx *hctx = m->private; \
struct kyber_hctx_data *khd = hctx->sched_data; \
\
return seq_list_next(v, &khd->rqs[domain], pos); \
} \
\
static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
__releases(&khd->lock) \
{ \
struct blk_mq_hw_ctx *hctx = m->private; \
struct kyber_hctx_data *khd = hctx->sched_data; \
\
spin_unlock(&khd->lock); \
} \
\
static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
.start = kyber_##name##_rqs_start, \
.next = kyber_##name##_rqs_next, \
.stop = kyber_##name##_rqs_stop, \
.show = blk_mq_debugfs_rq_show, \
}; \
\
static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
{ \
struct blk_mq_hw_ctx *hctx = data; \
struct kyber_hctx_data *khd = hctx->sched_data; \
wait_queue_t *wait = &khd->domain_wait[domain]; \
\
seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
return 0; \
}
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
#undef KYBER_DEBUGFS_DOMAIN_ATTRS
static int kyber_async_depth_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct kyber_queue_data *kqd = q->elevator->elevator_data;
seq_printf(m, "%u\n", kqd->async_depth);
return 0;
}
static int kyber_cur_domain_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
struct kyber_hctx_data *khd = hctx->sched_data;
switch (khd->cur_domain) {
case KYBER_READ:
seq_puts(m, "READ\n");
break;
case KYBER_SYNC_WRITE:
seq_puts(m, "SYNC_WRITE\n");
break;
case KYBER_OTHER:
seq_puts(m, "OTHER\n");
break;
default:
seq_printf(m, "%u\n", khd->cur_domain);
break;
}
return 0;
}
static int kyber_batching_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
struct kyber_hctx_data *khd = hctx->sched_data;
seq_printf(m, "%u\n", khd->batching);
return 0;
}
#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
{#name "_tokens", 0400, kyber_##name##_tokens_show}
static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
KYBER_QUEUE_DOMAIN_ATTRS(read),
KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
KYBER_QUEUE_DOMAIN_ATTRS(other),
{"async_depth", 0400, kyber_async_depth_show},
{},
};
#undef KYBER_QUEUE_DOMAIN_ATTRS
#define KYBER_HCTX_DOMAIN_ATTRS(name) \
{#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
{#name "_waiting", 0400, kyber_##name##_waiting_show}
static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
KYBER_HCTX_DOMAIN_ATTRS(read),
KYBER_HCTX_DOMAIN_ATTRS(sync_write),
KYBER_HCTX_DOMAIN_ATTRS(other),
{"cur_domain", 0400, kyber_cur_domain_show},
{"batching", 0400, kyber_batching_show},
{},
};
#undef KYBER_HCTX_DOMAIN_ATTRS
#endif
static struct elevator_type kyber_sched = {
.ops.mq = {
.init_sched = kyber_init_sched,
......@@ -696,6 +822,10 @@ static struct elevator_type kyber_sched = {
.has_work = kyber_has_work,
},
.uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
#endif
.elevator_attrs = kyber_sched_attrs,
.elevator_name = "kyber",
.elevator_owner = THIS_MODULE,
......
......@@ -19,6 +19,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
......@@ -517,6 +518,125 @@ static struct elv_fs_entry deadline_attrs[] = {
__ATTR_NULL
};
#ifdef CONFIG_BLK_DEBUG_FS
#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
static void *deadline_##name##_fifo_start(struct seq_file *m, \
loff_t *pos) \
__acquires(&dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
spin_lock(&dd->lock); \
return seq_list_start(&dd->fifo_list[ddir], *pos); \
} \
\
static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
loff_t *pos) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
return seq_list_next(v, &dd->fifo_list[ddir], pos); \
} \
\
static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
__releases(&dd->lock) \
{ \
struct request_queue *q = m->private; \
struct deadline_data *dd = q->elevator->elevator_data; \
\
spin_unlock(&dd->lock); \
} \
\
static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
.start = deadline_##name##_fifo_start, \
.next = deadline_##name##_fifo_next, \
.stop = deadline_##name##_fifo_stop, \
.show = blk_mq_debugfs_rq_show, \
}; \
\
static int deadline_##name##_next_rq_show(void *data, \
struct seq_file *m) \
{ \
struct request_queue *q = data; \
struct deadline_data *dd = q->elevator->elevator_data; \
struct request *rq = dd->next_rq[ddir]; \
\
if (rq) \
__blk_mq_debugfs_rq_show(m, rq); \
return 0; \
}
DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
#undef DEADLINE_DEBUGFS_DDIR_ATTRS
static int deadline_batching_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct deadline_data *dd = q->elevator->elevator_data;
seq_printf(m, "%u\n", dd->batching);
return 0;
}
static int deadline_starved_show(void *data, struct seq_file *m)
{
struct request_queue *q = data;
struct deadline_data *dd = q->elevator->elevator_data;
seq_printf(m, "%u\n", dd->starved);
return 0;
}
static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
__acquires(&dd->lock)
{
struct request_queue *q = m->private;
struct deadline_data *dd = q->elevator->elevator_data;
spin_lock(&dd->lock);
return seq_list_start(&dd->dispatch, *pos);
}
static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
{
struct request_queue *q = m->private;
struct deadline_data *dd = q->elevator->elevator_data;
return seq_list_next(v, &dd->dispatch, pos);
}
static void deadline_dispatch_stop(struct seq_file *m, void *v)
__releases(&dd->lock)
{
struct request_queue *q = m->private;
struct deadline_data *dd = q->elevator->elevator_data;
spin_unlock(&dd->lock);
}
static const struct seq_operations deadline_dispatch_seq_ops = {
.start = deadline_dispatch_start,
.next = deadline_dispatch_next,
.stop = deadline_dispatch_stop,
.show = blk_mq_debugfs_rq_show,
};
#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
{#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
{#name "_next_rq", 0400, deadline_##name##_next_rq_show}
static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
DEADLINE_QUEUE_DDIR_ATTRS(read),
DEADLINE_QUEUE_DDIR_ATTRS(write),
{"batching", 0400, deadline_batching_show},
{"starved", 0400, deadline_starved_show},
{"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
{},
};
#undef DEADLINE_QUEUE_DDIR_ATTRS
#endif
static struct elevator_type mq_deadline = {
.ops.mq = {
.insert_requests = dd_insert_requests,
......@@ -533,6 +653,9 @@ static struct elevator_type mq_deadline = {
},
.uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
#endif
.elevator_attrs = deadline_attrs,
.elevator_name = "mq-deadline",
.elevator_owner = THIS_MODULE,
......
......@@ -1697,9 +1697,8 @@ static void loop_queue_work(struct kthread_work *work)
loop_handle_cmd(cmd);
}
static int loop_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
......
This diff is collapsed.
......@@ -333,16 +333,6 @@ struct mtip_cmd {
dma_addr_t command_dma; /* corresponding physical address */
void *comp_data; /* data passed to completion function comp_func() */
/*
* Completion function called by the ISR upon completion of
* a command.
*/
void (*comp_func)(struct mtip_port *port,
int tag,
struct mtip_cmd *cmd,
int status);
int scatter_ents; /* Number of scatter list entries used */
int unaligned; /* command is unaligned on 4k boundary */
......
......@@ -1396,12 +1396,11 @@ static void nbd_dbg_close(void)
#endif
static int nbd_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->nbd = data;
cmd->nbd = set->driver_data;
return 0;
}
......
......@@ -4307,9 +4307,8 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
return ret;
}
static int rbd_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct work_struct *work = blk_mq_rq_to_pdu(rq);
......
......@@ -573,11 +573,10 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
static int virtblk_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct virtio_blk *vblk = data;
struct virtio_blk *vblk = set->driver_data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
#ifdef CONFIG_VIRTIO_BLK_SCSI
......
......@@ -74,7 +74,7 @@ static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
return 0;
err:
while (--i > lun_begin)
while (--i >= lun_begin)
clear_bit(i, dev->lun_map);
return -EBUSY;
......@@ -211,7 +211,7 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
return tgt_dev;
err_ch:
while (--i > 0)
while (--i >= 0)
kfree(dev_map->chnls[i].lun_offs);
kfree(luns);
err_luns:
......
......@@ -720,11 +720,10 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return 0;
}
static int dm_mq_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
return __dm_rq_init_rq(data, rq);
return __dm_rq_init_rq(set->driver_data, rq);
}
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
......
......@@ -334,10 +334,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int ubiblock_init_request(void *data, struct request *req,
unsigned int hctx_idx,
unsigned int request_idx,
unsigned int numa_node)
static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment