diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c index f5cc99a166c0..24b71c73d76b 100644 --- a/sys/kern/kern_condvar.c +++ b/sys/kern/kern_condvar.c @@ -70,6 +70,7 @@ static inline void cv_wakeup_one(kcondvar_t *); static inline void cv_wakeup_all(kcondvar_t *); syncobj_t cv_syncobj = { + .sobj_name = "cv", .sobj_flag = SOBJ_SLEEPQ_SORTED, .sobj_unsleep = cv_unsleep, .sobj_changepri = sleepq_changepri, diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c index a377001760b9..615e6bdb93ef 100644 --- a/sys/kern/kern_ktrace.c +++ b/sys/kern/kern_ktrace.c @@ -76,6 +76,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.179 2022/03/12 17:45:53 riastradh #include #include #include +#include #include #include @@ -806,8 +807,38 @@ ktr_psig(int sig, sig_t action, const sigset_t *mask, ktraddentry(l, kte, KTA_WAITOK); } +#include +#include + +static void +formatsymbol(char *buf, size_t len, vaddr_t addr) +{ + unsigned long start; + char offset[32]; + const char *mod, *sym; + int s; + + s = pserialize_read_enter(); + if (ksyms_getname(&mod, &sym, addr, KSYMS_PROC|KSYMS_CLOSEST) != 0 || + ksyms_getval(mod, sym, &start, KSYMS_ANY) != 0) { + snprintf(buf, len, "0x%"PRIxVADDR, addr); + } else { + if (addr < start) { /* XXX ??? */ + snprintf(offset, sizeof(offset), "-0x%zx", + (size_t)(start - addr)); + } else if (addr == start) { + offset[0] = '\0'; + } else { + snprintf(offset, sizeof(offset), "-0x%zx", + (size_t)(addr - start)); + } + snprintf(buf, len, "%s:%s%s", mod, sym, offset); + } + pserialize_read_exit(s); +} + void -ktr_csw(int out, int user) +ktr_csw(int out, int user, const struct syncobj *syncobj) { lwp_t *l = curlwp; struct proc *p = l->l_proc; @@ -819,10 +850,26 @@ ktr_csw(int out, int user) /* * Don't record context switches resulting from blocking on - * locks; it's too easy to get duff results. + * locks; the results are not useful, and the mutex may be in a + * softint, which would lead us to ktealloc in softint context, + * which is forbidden. */ - if (l->l_syncobj == &mutex_syncobj || l->l_syncobj == &rw_syncobj) + if (syncobj == &mutex_syncobj || syncobj == &rw_syncobj) return; + if (__predict_false(cpu_intr_p() || cpu_softintr_p())) { + char syncobj_sym[128]; + char name[16]; + + formatsymbol(syncobj_sym, sizeof(syncobj_sym), + (vaddr_t)syncobj); + memcpy(name, syncobj->sobj_name, sizeof name); + name[15] = '\0'; /* paranoia */ + KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), + "[%s] syncobj=%s@%p [%s] mutex_syncobj=%p rw_syncobj=%p", + l->l_name ? l->l_name : p->p_comm, + syncobj_sym, syncobj, syncobj->sobj_name, + &mutex_syncobj, &rw_syncobj); + } /* * We can't sleep if we're already going to sleep (if original diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index f89242f549fd..ef6bab4dda8a 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -301,6 +301,7 @@ lockops_t mutex_adaptive_lockops = { }; syncobj_t mutex_syncobj = { + .sobj_name = "mutex", .sobj_flag = SOBJ_SLEEPQ_SORTED, .sobj_unsleep = turnstile_unsleep, .sobj_changepri = turnstile_changepri, diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 1dfbfb5b3521..b45bdaec7103 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -134,6 +134,7 @@ lockops_t rwlock_lockops = { }; syncobj_t rw_syncobj = { + .sobj_name = "rw", .sobj_flag = SOBJ_SLEEPQ_SORTED, .sobj_unsleep = turnstile_unsleep, .sobj_changepri = turnstile_changepri, diff --git a/sys/kern/kern_sleepq.c b/sys/kern/kern_sleepq.c index c5c86e5a755b..a2b8ffe34549 100644 --- a/sys/kern/kern_sleepq.c +++ b/sys/kern/kern_sleepq.c @@ -217,6 +217,9 @@ * queue must already be locked, and any interlock (such as the kernel * lock) must have be released (see sleeptab_lookup(), sleepq_enter()). */ +#include +#include +#include void sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj, bool catch_p) @@ -228,6 +231,20 @@ KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL); KASSERT((l->l_flag & LW_SINTR) == 0); + { + int s, error; + + s = pserialize_read_enter(); + error = ksyms_getname(NULL, NULL, (vaddr_t)sobj, KSYMS_EXACT); + pserialize_read_exit(s); + + if (error) { + printf("%s: unknown syncobj: %p [%s] (wchan=%p, wmesg=%s)\n", + __func__, sobj, sobj->sobj_name, wchan, wmesg); + db_stacktrace(); + } + } + l->l_syncobj = sobj; l->l_wchan = wchan; l->l_sleepq = sq; @@ -309,8 +326,9 @@ lwp_t *l = curlwp; bool early = false; int biglocks = l->l_biglocks; + struct syncobj *syncobj = l->l_syncobj; - ktrcsw(1, 0); + ktrcsw(1, 0, syncobj); /* * If sleeping interruptably, check for pending signals, exits or @@ -397,7 +415,7 @@ } } - ktrcsw(0, 0); + ktrcsw(0, 0, syncobj); if (__predict_false(biglocks != 0)) { KERNEL_LOCK(biglocks, NULL); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 17646499fd51..6fe5b7570cb6 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -107,6 +107,7 @@ static void sched_changepri(struct lwp *, pri_t); static void sched_lendpri(struct lwp *, pri_t); syncobj_t sleep_syncobj = { + .sobj_name = "sleep", .sobj_flag = SOBJ_SLEEPQ_SORTED, .sobj_unsleep = sleepq_unsleep, .sobj_changepri = sleepq_changepri, @@ -115,6 +116,7 @@ syncobj_t sleep_syncobj = { }; syncobj_t sched_syncobj = { + .sobj_name = "sched", .sobj_flag = SOBJ_SLEEPQ_SORTED, .sobj_unsleep = sched_unsleep, .sobj_changepri = sched_changepri, @@ -123,6 +125,7 @@ syncobj_t sched_syncobj = { }; syncobj_t kpause_syncobj = { + .sobj_name = "kpause", .sobj_flag = SOBJ_SLEEPQ_NULL, .sobj_unsleep = sleepq_unsleep, .sobj_changepri = sleepq_changepri, diff --git a/sys/kern/kern_turnstile.c b/sys/kern/kern_turnstile.c index dc8e666aa896..9fa8230f9439 100644 --- a/sys/kern/kern_turnstile.c +++ b/sys/kern/kern_turnstile.c @@ -383,6 +383,10 @@ tc = &turnstile_chains[hash]; lock = &turnstile_locks[hash].lock; + KASSERTMSG(sobj == &mutex_syncobj || sobj == &rw_syncobj, + "sobj=%p[%s] mutex_syncobj=%p rw_syncobj=%p", + sobj, sobj->sobj_name, &mutex_syncobj, &rw_syncobj); + KASSERT(q == TS_READER_Q || q == TS_WRITER_Q); KASSERT(mutex_owned(lock)); KASSERT(l != NULL && l->l_ts != NULL); @@ -426,6 +430,10 @@ if (obase < PRI_KTHREAD) l->l_kpribase = PRI_KTHREAD; sleepq_enqueue(sq, obj, "tstile", sobj, false); + KASSERTMSG(l->l_syncobj == sobj, + "l->l_syncobj=%p[%s] sobj=%p[%s] mutex_syncobj=%p rw_syncobj=%p", + l->l_syncobj, l->l_syncobj->sobj_name, sobj, sobj->sobj_name, + &mutex_syncobj, &rw_syncobj); /* * Disable preemption across this entire block, as we may drop @@ -435,6 +443,10 @@ KPREEMPT_DISABLE(l); KASSERT(lock == l->l_mutex); turnstile_lendpri(l); + KASSERTMSG(l->l_syncobj == sobj, + "l->l_syncobj=%p[%s] sobj=%p[%s] mutex_syncobj=%p rw_syncobj=%p", + l->l_syncobj, l->l_syncobj->sobj_name, sobj, sobj->sobj_name, + &mutex_syncobj, &rw_syncobj); sleepq_block(0, false); l->l_kpribase = obase; KPREEMPT_ENABLE(l); diff --git a/sys/kern/sys_lwp.c b/sys/kern/sys_lwp.c index 6696f50d99c1..1cbe12d9fe7e 100644 --- a/sys/kern/sys_lwp.c +++ b/sys/kern/sys_lwp.c @@ -58,6 +58,7 @@ __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.82 2020/05/23 23:42:43 ad Exp $"); static const stack_t lwp_ss_init = SS_INIT; syncobj_t lwp_park_syncobj = { + .sobj_name = "lwp_park", .sobj_flag = SOBJ_SLEEPQ_NULL, .sobj_unsleep = sleepq_unsleep, .sobj_changepri = sleepq_changepri, diff --git a/sys/kern/sys_select.c b/sys/kern/sys_select.c index e490f304d913..800b11f81ba6 100644 --- a/sys/kern/sys_select.c +++ b/sys/kern/sys_select.c @@ -143,6 +143,7 @@ static const int sel_flag[] = { * enqueue LWPs at all, unless subject to a collision. */ syncobj_t select_sobj = { + .sobj_name = "select", .sobj_flag = SOBJ_SLEEPQ_LIFO, .sobj_unsleep = sleepq_unsleep, .sobj_changepri = sleepq_changepri, diff --git a/sys/sys/ktrace.h b/sys/sys/ktrace.h index 0a74223b9dcb..18a0fea03380 100644 --- a/sys/sys/ktrace.h +++ b/sys/sys/ktrace.h @@ -297,6 +297,8 @@ __END_DECLS #else +struct syncobj; + void ktrinit(void); void ktrderef(struct proc *); void ktradref(struct proc *); @@ -307,7 +309,7 @@ extern int ktrace_on; int ktruser(const char *, void *, size_t, int); bool ktr_point(int); -void ktr_csw(int, int); +void ktr_csw(int, int, const struct syncobj *); void ktr_emul(void); void ktr_geniov(int, enum uio_rw, struct iovec *, size_t, int); void ktr_genio(int, enum uio_rw, const void *, size_t, int); @@ -349,10 +351,10 @@ ktrpoint(int fac) } static __inline void -ktrcsw(int a, int b) +ktrcsw(int a, int b, const struct syncobj *c) { if (__predict_false(ktrace_on)) - ktr_csw(a, b); + ktr_csw(a, b, c); } static __inline void diff --git a/sys/sys/syncobj.h b/sys/sys/syncobj.h index a6676d0b3ae2..5e19c4efdd5f 100644 --- a/sys/sys/syncobj.h +++ b/sys/sys/syncobj.h @@ -42,6 +42,7 @@ typedef volatile const void *wchan_t; * Synchronisation object operations set. */ typedef struct syncobj { + char sobj_name[16]; u_int sobj_flag; void (*sobj_unsleep)(struct lwp *, bool); void (*sobj_changepri)(struct lwp *, pri_t);