tsan: fix a bug in trace part switching

Callers of TraceSwitchPart expect that TraceAcquire will always succeed
after the call. It's possible that TryTraceFunc/TraceMutexLock in TraceSwitchPart
that restore the current stack/mutexset filled the trace part exactly up
to the TracePart::kAlignment gap and the next TraceAcquire won't succeed.
Skip the alignment gap after writing initial stack/mutexset to avoid that.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D129777
This commit is contained in:
Dmitry Vyukov 2022-07-14 16:58:07 +02:00
parent 58fec78231
commit ab02680b5a
2 changed files with 21 additions and 0 deletions

View file

@ -951,6 +951,15 @@ void TraceSwitchPartImpl(ThreadState* thr) {
TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
d.addr, d.stack_id);
}
// Callers of TraceSwitchPart expect that TraceAcquire will always succeed
// after the call. It's possible that TryTraceFunc/TraceMutexLock above
// filled the trace part exactly up to the TracePart::kAlignment gap
// and the next TraceAcquire won't succeed. Skip the gap to avoid that.
EventFunc *ev;
if (!TraceAcquire(thr, &ev)) {
CHECK(TraceSkipGap(thr));
CHECK(TraceAcquire(thr, &ev));
}
{
Lock lock(&ctx->slot_mtx);
// There is a small chance that the slot may be not queued at this point.

View file

@ -243,6 +243,18 @@ TRACE_TEST(Trace, MultiPart) {
CHECK_EQ(mset.Get(1).count, 1);
}
TRACE_TEST(Trace, DeepSwitch) {
ThreadArray<1> thr;
for (int i = 0; i < 2000; i++) {
FuncEntry(thr, 0x1000);
const uptr kEvents = sizeof(TracePart) / sizeof(Event);
for (uptr i = 0; i < kEvents; i++) {
TraceMutexLock(thr, EventType::kLock, 0x4000, 0x5000, 0x6000);
TraceMutexUnlock(thr, 0x5000);
}
}
}
void CheckTraceState(uptr count, uptr finished, uptr excess, uptr recycle) {
Lock l(&ctx->slot_mtx);
Printf("CheckTraceState(%zu/%zu, %zu/%zu, %zu/%zu, %zu/%zu)\n",