// https://syzkaller.appspot.com/bug?id=e9c83cc3d509b568109880daff42a63e945fbc5b // autogenerated by syzkaller (http://github.com/google/syzkaller) #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void sleep_ms(uint64_t ms) { usleep(ms * 1000); } uint64_t current_time_ms() { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1); return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000; } static void thread_start(void* (*fn)(void*), void* arg) { pthread_t th; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 128 << 10); if (pthread_create(&th, &attr, fn, arg)) exit(1); pthread_attr_destroy(&attr); } typedef struct { int state; } event_t; static void event_init(event_t* ev) { ev->state = 0; } static void event_reset(event_t* ev) { ev->state = 0; } static void event_set(event_t* ev) { if (ev->state) exit(1); __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE); syscall(SYS_futex, &ev->state, FUTEX_WAKE); } static void event_wait(event_t* ev) { while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE)) syscall(SYS_futex, &ev->state, FUTEX_WAIT, 0, 0); } static int event_isset(event_t* ev) { return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE); } static int event_timedwait(event_t* ev, uint64_t timeout_ms) { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1); const uint64_t kNsPerSec = 1000 * 1000 * 1000; uint64_t start_ns = (uint64_t)ts.tv_sec * kNsPerSec + (uint64_t)ts.tv_nsec; uint64_t now_ns = start_ns; uint64_t timeout_ns = timeout_ms * 1000 * 1000; for (;;) { uint64_t remain_ns = timeout_ns - (now_ns - start_ns); ts.tv_sec = remain_ns / kNsPerSec; ts.tv_nsec = remain_ns % kNsPerSec; syscall(SYS_futex, &ev->state, FUTEX_WAIT, 0, &ts); if (__atomic_load_n(&ev->state, __ATOMIC_RELAXED)) return 1; if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1); now_ns = (uint64_t)ts.tv_sec * kNsPerSec + (uint64_t)ts.tv_nsec; if (now_ns - start_ns > timeout_ns) return 0; } } extern unsigned long long procid; static void setup_loop() { } static void reset_loop() { } static void setup_test() { prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); setpgrp(); } static void reset_test() { int fd; for (fd = 3; fd < 30; fd++) close(fd); } struct thread_t { int created, call; event_t ready, done; }; static struct thread_t threads[16]; static void execute_call(int call); static int running; static int collide; static void* thr(void* arg) { struct thread_t* th = (struct thread_t*)arg; for (;;) { event_wait(&th->ready); event_reset(&th->ready); execute_call(th->call); __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED); event_set(&th->done); } return 0; } static void execute(int num_calls) { int call, thread; running = 0; for (call = 0; call < num_calls; call++) { for (thread = 0; thread < sizeof(threads) / sizeof(threads[0]); thread++) { struct thread_t* th = &threads[thread]; if (!th->created) { th->created = 1; event_init(&th->ready); event_init(&th->done); event_set(&th->done); thread_start(thr, th); } if (!event_isset(&th->done)) continue; event_reset(&th->done); th->call = call; __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED); event_set(&th->ready); if (collide && (call % 2) == 0) break; event_timedwait(&th->done, 25); if (__atomic_load_n(&running, __ATOMIC_RELAXED)) sleep_ms((call == num_calls - 1) ? 10 : 2); break; } } } static void execute_one(); #define WAIT_FLAGS __WALL static void loop() { setup_loop(); int iter; for (iter = 0;; iter++) { reset_loop(); int pid = fork(); if (pid < 0) exit(1); if (pid == 0) { setup_test(); execute_one(); reset_test(); exit(0); } int status = 0; uint64_t start = current_time_ms(); for (;;) { if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid) break; sleep_ms(1); if (current_time_ms() - start < 5 * 1000) continue; kill(-pid, SIGKILL); kill(pid, SIGKILL); while (waitpid(-1, &status, WAIT_FLAGS) != pid) { } break; } } } uint64_t r[3] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}; unsigned long long procid; void execute_call(int call) { long res; switch (call) { case 0: memcpy((void*)0x20000180, "/dev/kvm", 9); res = syscall(__NR_openat, 0xffffffffffffff9c, 0x20000180, 0xc00200000000, 0); if (res != -1) r[0] = res; break; case 1: res = syscall(__NR_ioctl, r[0], 0xae01, 0); if (res != -1) r[1] = res; break; case 2: res = syscall(__NR_ioctl, r[1], 0xae41, 0); if (res != -1) r[2] = res; break; case 3: *(uint8_t*)0x200000c0 = 1; *(uint8_t*)0x200000c1 = 7; *(uint8_t*)0x200000c2 = 0x56; *(uint8_t*)0x200000c3 = 0; *(uint32_t*)0x200000c4 = 8; *(uint8_t*)0x200000c8 = 4; *(uint8_t*)0x200000c9 = 0; *(uint8_t*)0x200000ca = 9; *(uint8_t*)0x200000cb = 0; *(uint8_t*)0x200000cc = 7; *(uint8_t*)0x200000cd = 0; *(uint8_t*)0x200000ce = 1; *(uint8_t*)0x200000cf = 0; *(uint32_t*)0x200000d0 = 5; *(uint32_t*)0x200000d4 = 7; *(uint8_t*)0x200000d8 = 9; *(uint8_t*)0x200000d9 = 1; *(uint8_t*)0x200000da = -1; *(uint8_t*)0x200000db = 0x2b; syscall(__NR_ioctl, r[2], 0x4040aea0, 0x200000c0); break; case 4: *(uint32_t*)0x20000080 = 0; *(uint32_t*)0x20000084 = 2; *(uint64_t*)0x20000088 = 0; *(uint64_t*)0x20000090 = 0x1000; *(uint64_t*)0x20000098 = 0x20016000; syscall(__NR_ioctl, r[1], 0x4020ae46, 0x20000080); break; case 5: syscall(__NR_ioctl, r[2], 0xae80, 0); break; case 6: syscall(__NR_ioctl, r[2], 0xae80, 0); break; } } void execute_one() { execute(7); collide = 1; execute(7); } int main() { syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0); for (procid = 0; procid < 8; procid++) { if (fork() == 0) { for (;;) { loop(); } } } sleep(1000000); return 0; }