// https://syzkaller.appspot.com/bug?id=2b6a5e7ed9c189aadc974fc5ff168b131c005947 // autogenerated by syzkaller (http://github.com/google/syzkaller) #define _GNU_SOURCE #include #include #include #include #include #include __attribute__((noreturn)) static void doexit(int status) { volatile unsigned i; syscall(__NR_exit_group, status); for (i = 0;; i++) { } } #include #include #include #include #include static __thread int skip_segv; static __thread jmp_buf segv_env; static void segv_handler(int sig, siginfo_t* info, void* uctx) { uintptr_t addr = (uintptr_t)info->si_addr; const uintptr_t prog_start = 1 << 20; const uintptr_t prog_end = 100 << 20; if (__atomic_load_n(&skip_segv, __ATOMIC_RELAXED) && (addr < prog_start || addr > prog_end)) { _longjmp(segv_env, 1); } doexit(sig); } static void install_segv_handler() { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; syscall(SYS_rt_sigaction, 0x20, &sa, NULL, 8); syscall(SYS_rt_sigaction, 0x21, &sa, NULL, 8); memset(&sa, 0, sizeof(sa)); sa.sa_sigaction = segv_handler; sa.sa_flags = SA_NODEFER | SA_SIGINFO; sigaction(SIGSEGV, &sa, NULL); sigaction(SIGBUS, &sa, NULL); } #define NONFAILING(...) \ { \ __atomic_fetch_add(&skip_segv, 1, __ATOMIC_SEQ_CST); \ if (_setjmp(segv_env) == 0) { \ __VA_ARGS__; \ } \ __atomic_fetch_sub(&skip_segv, 1, __ATOMIC_SEQ_CST); \ } static void test(); void loop() { while (1) { test(); } } struct thread_t { int created, running, call; pthread_t th; }; static struct thread_t threads[16]; static void execute_call(int call); static int running; static int collide; static void* thr(void* arg) { struct thread_t* th = (struct thread_t*)arg; for (;;) { while (!__atomic_load_n(&th->running, __ATOMIC_ACQUIRE)) syscall(SYS_futex, &th->running, FUTEX_WAIT, 0, 0); execute_call(th->call); __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED); __atomic_store_n(&th->running, 0, __ATOMIC_RELEASE); syscall(SYS_futex, &th->running, FUTEX_WAKE); } return 0; } static void execute(int num_calls) { int call, thread; running = 0; for (call = 0; call < num_calls; call++) { for (thread = 0; thread < sizeof(threads) / sizeof(threads[0]); thread++) { struct thread_t* th = &threads[thread]; if (!th->created) { th->created = 1; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 128 << 10); pthread_create(&th->th, &attr, thr, th); } if (!__atomic_load_n(&th->running, __ATOMIC_ACQUIRE)) { th->call = call; __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED); __atomic_store_n(&th->running, 1, __ATOMIC_RELEASE); syscall(SYS_futex, &th->running, FUTEX_WAKE); if (collide && call % 2) break; struct timespec ts; ts.tv_sec = 0; ts.tv_nsec = 20 * 1000 * 1000; syscall(SYS_futex, &th->running, FUTEX_WAIT, 1, &ts); if (running) usleep((call == num_calls - 1) ? 10000 : 1000); break; } } } } long r[3]; uint64_t procid; void execute_call(int call) { switch (call) { case 0: syscall(__NR_mmap, 0x20000000, 0xfff000, 0x3, 0x32, 0xffffffff, 0x0); break; case 1: NONFAILING(memcpy((void*)0x20988000, "/dev/kvm", 9)); r[0] = syscall(__NR_openat, 0xffffffffffffff9c, 0x20988000, 0x0, 0x0); break; case 2: r[1] = syscall(__NR_ioctl, r[0], 0xae01, 0x0); break; case 3: r[2] = syscall(__NR_ioctl, r[1], 0xae41, 0x0); break; case 4: syscall(__NR_clock_gettime, 0x0, 0x20000000); break; case 5: NONFAILING(*(uint32_t*)0x20001000 = 0x10005); NONFAILING(*(uint32_t*)0x20001004 = 0x0); NONFAILING(*(uint64_t*)0x20001008 = 0x0); NONFAILING(*(uint64_t*)0x20001010 = 0x2000); NONFAILING(*(uint64_t*)0x20001018 = 0x20000000); syscall(__NR_ioctl, r[1], 0x4020ae46, 0x20001000); break; case 6: syscall(__NR_ioctl, r[2], 0xaeb7); break; case 7: syscall(__NR_ioctl, r[2], 0xae80, 0x0); break; case 8: syscall(__NR_ioctl, r[2], 0xae80, 0x0); break; } } void test() { memset(r, -1, sizeof(r)); execute(9); collide = 1; execute(9); } int main() { int i; for (i = 0; i < 8; i++) { if (fork() == 0) { procid = i; install_segv_handler(); loop(); return 0; } } sleep(1000000); return 0; }