|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +#include <linux/kernel.h> |
| 3 | +#include <linux/errno.h> |
| 4 | +#include <linux/fs.h> |
| 5 | +#include <linux/file.h> |
| 6 | +#include <linux/proc_fs.h> |
| 7 | +#include <linux/seq_file.h> |
| 8 | +#include <linux/io_uring.h> |
| 9 | + |
| 10 | +#include <uapi/linux/io_uring.h> |
| 11 | + |
| 12 | +#include "io_uring_types.h" |
| 13 | +#include "io_uring.h" |
| 14 | +#include "sqpoll.h" |
| 15 | +#include "fdinfo.h" |
| 16 | + |
| 17 | +#ifdef CONFIG_PROC_FS |
| 18 | +static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id, |
| 19 | + const struct cred *cred) |
| 20 | +{ |
| 21 | + struct user_namespace *uns = seq_user_ns(m); |
| 22 | + struct group_info *gi; |
| 23 | + kernel_cap_t cap; |
| 24 | + unsigned __capi; |
| 25 | + int g; |
| 26 | + |
| 27 | + seq_printf(m, "%5d\n", id); |
| 28 | + seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid)); |
| 29 | + seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid)); |
| 30 | + seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid)); |
| 31 | + seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid)); |
| 32 | + seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid)); |
| 33 | + seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid)); |
| 34 | + seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid)); |
| 35 | + seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid)); |
| 36 | + seq_puts(m, "\n\tGroups:\t"); |
| 37 | + gi = cred->group_info; |
| 38 | + for (g = 0; g < gi->ngroups; g++) { |
| 39 | + seq_put_decimal_ull(m, g ? " " : "", |
| 40 | + from_kgid_munged(uns, gi->gid[g])); |
| 41 | + } |
| 42 | + seq_puts(m, "\n\tCapEff:\t"); |
| 43 | + cap = cred->cap_effective; |
| 44 | + CAP_FOR_EACH_U32(__capi) |
| 45 | + seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8); |
| 46 | + seq_putc(m, '\n'); |
| 47 | + return 0; |
| 48 | +} |
| 49 | + |
| 50 | +static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, |
| 51 | + struct seq_file *m) |
| 52 | +{ |
| 53 | + struct io_sq_data *sq = NULL; |
| 54 | + struct io_overflow_cqe *ocqe; |
| 55 | + struct io_rings *r = ctx->rings; |
| 56 | + unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; |
| 57 | + unsigned int sq_head = READ_ONCE(r->sq.head); |
| 58 | + unsigned int sq_tail = READ_ONCE(r->sq.tail); |
| 59 | + unsigned int cq_head = READ_ONCE(r->cq.head); |
| 60 | + unsigned int cq_tail = READ_ONCE(r->cq.tail); |
| 61 | + unsigned int cq_shift = 0; |
| 62 | + unsigned int sq_entries, cq_entries; |
| 63 | + bool has_lock; |
| 64 | + bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); |
| 65 | + unsigned int i; |
| 66 | + |
| 67 | + if (is_cqe32) |
| 68 | + cq_shift = 1; |
| 69 | + |
| 70 | + /* |
| 71 | + * we may get imprecise sqe and cqe info if uring is actively running |
| 72 | + * since we get cached_sq_head and cached_cq_tail without uring_lock |
| 73 | + * and sq_tail and cq_head are changed by userspace. But it's ok since |
| 74 | + * we usually use these info when it is stuck. |
| 75 | + */ |
| 76 | + seq_printf(m, "SqMask:\t0x%x\n", sq_mask); |
| 77 | + seq_printf(m, "SqHead:\t%u\n", sq_head); |
| 78 | + seq_printf(m, "SqTail:\t%u\n", sq_tail); |
| 79 | + seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); |
| 80 | + seq_printf(m, "CqMask:\t0x%x\n", cq_mask); |
| 81 | + seq_printf(m, "CqHead:\t%u\n", cq_head); |
| 82 | + seq_printf(m, "CqTail:\t%u\n", cq_tail); |
| 83 | + seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); |
| 84 | + seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head); |
| 85 | + sq_entries = min(sq_tail - sq_head, ctx->sq_entries); |
| 86 | + for (i = 0; i < sq_entries; i++) { |
| 87 | + unsigned int entry = i + sq_head; |
| 88 | + unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); |
| 89 | + struct io_uring_sqe *sqe; |
| 90 | + |
| 91 | + if (sq_idx > sq_mask) |
| 92 | + continue; |
| 93 | + sqe = &ctx->sq_sqes[sq_idx]; |
| 94 | + seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", |
| 95 | + sq_idx, sqe->opcode, sqe->fd, sqe->flags, |
| 96 | + sqe->user_data); |
| 97 | + } |
| 98 | + seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); |
| 99 | + cq_entries = min(cq_tail - cq_head, ctx->cq_entries); |
| 100 | + for (i = 0; i < cq_entries; i++) { |
| 101 | + unsigned int entry = i + cq_head; |
| 102 | + struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; |
| 103 | + |
| 104 | + if (!is_cqe32) { |
| 105 | + seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n", |
| 106 | + entry & cq_mask, cqe->user_data, cqe->res, |
| 107 | + cqe->flags); |
| 108 | + } else { |
| 109 | + seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, " |
| 110 | + "extra1:%llu, extra2:%llu\n", |
| 111 | + entry & cq_mask, cqe->user_data, cqe->res, |
| 112 | + cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]); |
| 113 | + } |
| 114 | + } |
| 115 | + |
| 116 | + /* |
| 117 | + * Avoid ABBA deadlock between the seq lock and the io_uring mutex, |
| 118 | + * since fdinfo case grabs it in the opposite direction of normal use |
| 119 | + * cases. If we fail to get the lock, we just don't iterate any |
| 120 | + * structures that could be going away outside the io_uring mutex. |
| 121 | + */ |
| 122 | + has_lock = mutex_trylock(&ctx->uring_lock); |
| 123 | + |
| 124 | + if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { |
| 125 | + sq = ctx->sq_data; |
| 126 | + if (!sq->thread) |
| 127 | + sq = NULL; |
| 128 | + } |
| 129 | + |
| 130 | + seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1); |
| 131 | + seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1); |
| 132 | + seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); |
| 133 | + for (i = 0; has_lock && i < ctx->nr_user_files; i++) { |
| 134 | + struct file *f = io_file_from_index(&ctx->file_table, i); |
| 135 | + |
| 136 | + if (f) |
| 137 | + seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); |
| 138 | + else |
| 139 | + seq_printf(m, "%5u: <none>\n", i); |
| 140 | + } |
| 141 | + seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); |
| 142 | + for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { |
| 143 | + struct io_mapped_ubuf *buf = ctx->user_bufs[i]; |
| 144 | + unsigned int len = buf->ubuf_end - buf->ubuf; |
| 145 | + |
| 146 | + seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len); |
| 147 | + } |
| 148 | + if (has_lock && !xa_empty(&ctx->personalities)) { |
| 149 | + unsigned long index; |
| 150 | + const struct cred *cred; |
| 151 | + |
| 152 | + seq_printf(m, "Personalities:\n"); |
| 153 | + xa_for_each(&ctx->personalities, index, cred) |
| 154 | + io_uring_show_cred(m, index, cred); |
| 155 | + } |
| 156 | + if (has_lock) |
| 157 | + mutex_unlock(&ctx->uring_lock); |
| 158 | + |
| 159 | + seq_puts(m, "PollList:\n"); |
| 160 | + spin_lock(&ctx->completion_lock); |
| 161 | + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { |
| 162 | + struct hlist_head *list = &ctx->cancel_hash[i]; |
| 163 | + struct io_kiocb *req; |
| 164 | + |
| 165 | + hlist_for_each_entry(req, list, hash_node) |
| 166 | + seq_printf(m, " op=%d, task_works=%d\n", req->opcode, |
| 167 | + task_work_pending(req->task)); |
| 168 | + } |
| 169 | + |
| 170 | + seq_puts(m, "CqOverflowList:\n"); |
| 171 | + list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) { |
| 172 | + struct io_uring_cqe *cqe = &ocqe->cqe; |
| 173 | + |
| 174 | + seq_printf(m, " user_data=%llu, res=%d, flags=%x\n", |
| 175 | + cqe->user_data, cqe->res, cqe->flags); |
| 176 | + |
| 177 | + } |
| 178 | + |
| 179 | + spin_unlock(&ctx->completion_lock); |
| 180 | +} |
| 181 | + |
| 182 | +__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f) |
| 183 | +{ |
| 184 | + struct io_ring_ctx *ctx = f->private_data; |
| 185 | + |
| 186 | + if (percpu_ref_tryget(&ctx->refs)) { |
| 187 | + __io_uring_show_fdinfo(ctx, m); |
| 188 | + percpu_ref_put(&ctx->refs); |
| 189 | + } |
| 190 | +} |
| 191 | +#endif |
0 commit comments