// https://syzkaller.appspot.com/bug?id=1eaa30de66a3dcf8f4ecffec1ecc90dba071b6f2 // autogenerated by syzkaller (https://github.com/google/syzkaller) #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void sleep_ms(uint64_t ms) { usleep(ms * 1000); } static uint64_t current_time_ms(void) { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1); return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000; } static void thread_start(void* (*fn)(void*), void* arg) { pthread_t th; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 128 << 10); int i; for (i = 0; i < 100; i++) { if (pthread_create(&th, &attr, fn, arg) == 0) { pthread_attr_destroy(&attr); return; } if (errno == EAGAIN) { usleep(50); continue; } break; } exit(1); } typedef struct { int state; } event_t; static void event_init(event_t* ev) { ev->state = 0; } static void event_reset(event_t* ev) { ev->state = 0; } static void event_set(event_t* ev) { if (ev->state) exit(1); __atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE); syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG); } static void event_wait(event_t* ev) { while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE)) syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0); } static int event_isset(event_t* ev) { return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE); } static int event_timedwait(event_t* ev, uint64_t timeout) { uint64_t start = current_time_ms(); uint64_t now = start; for (;;) { uint64_t remain = timeout - (now - start); struct timespec ts; ts.tv_sec = remain / 1000; ts.tv_nsec = (remain % 1000) * 1000 * 1000; syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts); if (__atomic_load_n(&ev->state, __ATOMIC_RELAXED)) return 1; now = current_time_ms(); if (now - start > timeout) return 0; } } static bool write_file(const char* file, const char* what, ...) { char buf[1024]; va_list args; va_start(args, what); vsnprintf(buf, sizeof(buf), what, args); va_end(args); buf[sizeof(buf) - 1] = 0; int len = strlen(buf); int fd = open(file, O_WRONLY | O_CLOEXEC); if (fd == -1) return false; if (write(fd, buf, len) != len) { int err = errno; close(fd); errno = err; return false; } close(fd); return true; } static struct { char* pos; int nesting; struct nlattr* nested[8]; char buf[1024]; } nlmsg; static void netlink_init(int typ, int flags, const void* data, int size) { memset(&nlmsg, 0, sizeof(nlmsg)); struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg.buf; hdr->nlmsg_type = typ; hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags; memcpy(hdr + 1, data, size); nlmsg.pos = (char*)(hdr + 1) + NLMSG_ALIGN(size); } static void netlink_attr(int typ, const void* data, int size) { struct nlattr* attr = (struct nlattr*)nlmsg.pos; attr->nla_len = sizeof(*attr) + size; attr->nla_type = typ; memcpy(attr + 1, data, size); nlmsg.pos += NLMSG_ALIGN(attr->nla_len); } static int netlink_send(int sock) { if (nlmsg.pos > nlmsg.buf + sizeof(nlmsg.buf) || nlmsg.nesting) exit(1); struct nlmsghdr* hdr = (struct nlmsghdr*)nlmsg.buf; hdr->nlmsg_len = nlmsg.pos - nlmsg.buf; struct sockaddr_nl addr; memset(&addr, 0, sizeof(addr)); addr.nl_family = AF_NETLINK; unsigned n = sendto(sock, nlmsg.buf, hdr->nlmsg_len, 0, (struct sockaddr*)&addr, sizeof(addr)); if (n != hdr->nlmsg_len) exit(1); n = recv(sock, nlmsg.buf, sizeof(nlmsg.buf), 0); if (n < sizeof(struct nlmsghdr) + sizeof(struct nlmsgerr)) exit(1); if (hdr->nlmsg_type != NLMSG_ERROR) exit(1); return -((struct nlmsgerr*)(hdr + 1))->error; } static void netlink_device_change(int sock, const char* name, bool up, const char* master, const void* mac, int macsize) { struct ifinfomsg hdr; memset(&hdr, 0, sizeof(hdr)); if (up) hdr.ifi_flags = hdr.ifi_change = IFF_UP; netlink_init(RTM_NEWLINK, 0, &hdr, sizeof(hdr)); netlink_attr(IFLA_IFNAME, name, strlen(name)); if (master) { int ifindex = if_nametoindex(master); netlink_attr(IFLA_MASTER, &ifindex, sizeof(ifindex)); } if (macsize) netlink_attr(IFLA_ADDRESS, mac, macsize); int err = netlink_send(sock); (void)err; } static int netlink_add_addr(int sock, const char* dev, const void* addr, int addrsize) { struct ifaddrmsg hdr; memset(&hdr, 0, sizeof(hdr)); hdr.ifa_family = addrsize == 4 ? AF_INET : AF_INET6; hdr.ifa_prefixlen = addrsize == 4 ? 24 : 120; hdr.ifa_scope = RT_SCOPE_UNIVERSE; hdr.ifa_index = if_nametoindex(dev); netlink_init(RTM_NEWADDR, NLM_F_CREATE | NLM_F_REPLACE, &hdr, sizeof(hdr)); netlink_attr(IFA_LOCAL, addr, addrsize); netlink_attr(IFA_ADDRESS, addr, addrsize); return netlink_send(sock); } static void netlink_add_addr4(int sock, const char* dev, const char* addr) { struct in_addr in_addr; inet_pton(AF_INET, addr, &in_addr); int err = netlink_add_addr(sock, dev, &in_addr, sizeof(in_addr)); (void)err; } static void netlink_add_addr6(int sock, const char* dev, const char* addr) { struct in6_addr in6_addr; inet_pton(AF_INET6, addr, &in6_addr); int err = netlink_add_addr(sock, dev, &in6_addr, sizeof(in6_addr)); (void)err; } static void netlink_add_neigh(int sock, const char* name, const void* addr, int addrsize, const void* mac, int macsize) { struct ndmsg hdr; memset(&hdr, 0, sizeof(hdr)); hdr.ndm_family = addrsize == 4 ? AF_INET : AF_INET6; hdr.ndm_ifindex = if_nametoindex(name); hdr.ndm_state = NUD_PERMANENT; netlink_init(RTM_NEWNEIGH, NLM_F_EXCL | NLM_F_CREATE, &hdr, sizeof(hdr)); netlink_attr(NDA_DST, addr, addrsize); netlink_attr(NDA_LLADDR, mac, macsize); int err = netlink_send(sock); (void)err; } static int tunfd = -1; static int tun_frags_enabled; #define SYZ_TUN_MAX_PACKET_SIZE 1000 #define TUN_IFACE "syz_tun" #define LOCAL_MAC 0xaaaaaaaaaaaa #define REMOTE_MAC 0xaaaaaaaaaabb #define LOCAL_IPV4 "172.20.20.170" #define REMOTE_IPV4 "172.20.20.187" #define LOCAL_IPV6 "fe80::aa" #define REMOTE_IPV6 "fe80::bb" #define IFF_NAPI 0x0010 #define IFF_NAPI_FRAGS 0x0020 static void initialize_tun(void) { tunfd = open("/dev/net/tun", O_RDWR | O_NONBLOCK); if (tunfd == -1) { printf("tun: can't open /dev/net/tun: please enable CONFIG_TUN=y\n"); printf("otherwise fuzzing or reproducing might not work as intended\n"); return; } const int kTunFd = 240; if (dup2(tunfd, kTunFd) < 0) exit(1); close(tunfd); tunfd = kTunFd; struct ifreq ifr; memset(&ifr, 0, sizeof(ifr)); strncpy(ifr.ifr_name, TUN_IFACE, IFNAMSIZ); ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS; if (ioctl(tunfd, TUNSETIFF, (void*)&ifr) < 0) { ifr.ifr_flags = IFF_TAP | IFF_NO_PI; if (ioctl(tunfd, TUNSETIFF, (void*)&ifr) < 0) exit(1); } if (ioctl(tunfd, TUNGETIFF, (void*)&ifr) < 0) exit(1); tun_frags_enabled = (ifr.ifr_flags & IFF_NAPI_FRAGS) != 0; char sysctl[64]; sprintf(sysctl, "/proc/sys/net/ipv6/conf/%s/accept_dad", TUN_IFACE); write_file(sysctl, "0"); sprintf(sysctl, "/proc/sys/net/ipv6/conf/%s/router_solicitations", TUN_IFACE); write_file(sysctl, "0"); int sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (sock == -1) exit(1); netlink_add_addr4(sock, TUN_IFACE, LOCAL_IPV4); netlink_add_addr6(sock, TUN_IFACE, LOCAL_IPV6); uint64_t macaddr = REMOTE_MAC; struct in_addr in_addr; inet_pton(AF_INET, REMOTE_IPV4, &in_addr); netlink_add_neigh(sock, TUN_IFACE, &in_addr, sizeof(in_addr), &macaddr, ETH_ALEN); struct in6_addr in6_addr; inet_pton(AF_INET6, REMOTE_IPV6, &in6_addr); netlink_add_neigh(sock, TUN_IFACE, &in6_addr, sizeof(in6_addr), &macaddr, ETH_ALEN); macaddr = LOCAL_MAC; netlink_device_change(sock, TUN_IFACE, true, 0, &macaddr, ETH_ALEN); close(sock); } #define MAX_FRAGS 4 struct vnet_fragmentation { uint32_t full; uint32_t count; uint32_t frags[MAX_FRAGS]; }; static long syz_emit_ethernet(volatile long a0, volatile long a1, volatile long a2) { if (tunfd < 0) return (uintptr_t)-1; uint32_t length = a0; char* data = (char*)a1; struct vnet_fragmentation* frags = (struct vnet_fragmentation*)a2; struct iovec vecs[MAX_FRAGS + 1]; uint32_t nfrags = 0; if (!tun_frags_enabled || frags == NULL) { vecs[nfrags].iov_base = data; vecs[nfrags].iov_len = length; nfrags++; } else { bool full = true; uint32_t i, count = 0; full = frags->full; count = frags->count; if (count > MAX_FRAGS) count = MAX_FRAGS; for (i = 0; i < count && length != 0; i++) { uint32_t size = 0; size = frags->frags[i]; if (size > length) size = length; vecs[nfrags].iov_base = data; vecs[nfrags].iov_len = size; nfrags++; data += size; length -= size; } if (length != 0 && (full || nfrags == 0)) { vecs[nfrags].iov_base = data; vecs[nfrags].iov_len = length; nfrags++; } } return writev(tunfd, vecs, nfrags); } static void setup_common() { if (mount(0, "/sys/fs/fuse/connections", "fusectl", 0, 0)) { } } static void loop(); static void sandbox_common() { prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); setpgrp(); setsid(); struct rlimit rlim; rlim.rlim_cur = rlim.rlim_max = (200 << 20); setrlimit(RLIMIT_AS, &rlim); rlim.rlim_cur = rlim.rlim_max = 32 << 20; setrlimit(RLIMIT_MEMLOCK, &rlim); rlim.rlim_cur = rlim.rlim_max = 136 << 20; setrlimit(RLIMIT_FSIZE, &rlim); rlim.rlim_cur = rlim.rlim_max = 1 << 20; setrlimit(RLIMIT_STACK, &rlim); rlim.rlim_cur = rlim.rlim_max = 0; setrlimit(RLIMIT_CORE, &rlim); rlim.rlim_cur = rlim.rlim_max = 256; setrlimit(RLIMIT_NOFILE, &rlim); if (unshare(CLONE_NEWNS)) { } if (unshare(CLONE_NEWIPC)) { } if (unshare(0x02000000)) { } if (unshare(CLONE_NEWUTS)) { } if (unshare(CLONE_SYSVSEM)) { } typedef struct { const char* name; const char* value; } sysctl_t; static const sysctl_t sysctls[] = { {"/proc/sys/kernel/shmmax", "16777216"}, {"/proc/sys/kernel/shmall", "536870912"}, {"/proc/sys/kernel/shmmni", "1024"}, {"/proc/sys/kernel/msgmax", "8192"}, {"/proc/sys/kernel/msgmni", "1024"}, {"/proc/sys/kernel/msgmnb", "1024"}, {"/proc/sys/kernel/sem", "1024 1048576 500 1024"}, }; unsigned i; for (i = 0; i < sizeof(sysctls) / sizeof(sysctls[0]); i++) write_file(sysctls[i].name, sysctls[i].value); } int wait_for_loop(int pid) { if (pid < 0) exit(1); int status = 0; while (waitpid(-1, &status, __WALL) != pid) { } return WEXITSTATUS(status); } static void drop_caps(void) { struct __user_cap_header_struct cap_hdr = {}; struct __user_cap_data_struct cap_data[2] = {}; cap_hdr.version = _LINUX_CAPABILITY_VERSION_3; cap_hdr.pid = getpid(); if (syscall(SYS_capget, &cap_hdr, &cap_data)) exit(1); const int drop = (1 << CAP_SYS_PTRACE) | (1 << CAP_SYS_NICE); cap_data[0].effective &= ~drop; cap_data[0].permitted &= ~drop; cap_data[0].inheritable &= ~drop; if (syscall(SYS_capset, &cap_hdr, &cap_data)) exit(1); } static int do_sandbox_none(void) { if (unshare(CLONE_NEWPID)) { } int pid = fork(); if (pid != 0) return wait_for_loop(pid); setup_common(); sandbox_common(); drop_caps(); if (unshare(CLONE_NEWNET)) { } initialize_tun(); loop(); exit(1); } struct thread_t { int created, call; event_t ready, done; }; static struct thread_t threads[16]; static void execute_call(int call); static int running; static void* thr(void* arg) { struct thread_t* th = (struct thread_t*)arg; for (;;) { event_wait(&th->ready); event_reset(&th->ready); execute_call(th->call); __atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED); event_set(&th->done); } return 0; } static void loop(void) { int i, call, thread; for (call = 0; call < 10; call++) { for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0])); thread++) { struct thread_t* th = &threads[thread]; if (!th->created) { th->created = 1; event_init(&th->ready); event_init(&th->done); event_set(&th->done); thread_start(thr, th); } if (!event_isset(&th->done)) continue; event_reset(&th->done); th->call = call; __atomic_fetch_add(&running, 1, __ATOMIC_RELAXED); event_set(&th->ready); event_timedwait(&th->done, 45); break; } } for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++) sleep_ms(1); } uint64_t r[4] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff}; void execute_call(int call) { intptr_t res; switch (call) { case 0: res = syscall(__NR_pipe, 0x20000300); if (res != -1) { r[0] = *(uint32_t*)0x20000300; r[1] = *(uint32_t*)0x20000304; } break; case 1: res = syscall(__NR_socket, 0x11, 3, 0x300); if (res != -1) r[2] = res; break; case 2: *(uint16_t*)0x200000c0 = 0; *(uint8_t*)0x200000c2 = 0; *(uint8_t*)0x200000c3 = 0xfc; syscall(__NR_setsockopt, r[2], 0x107, 0x12, 0x200000c0, 4); break; case 3: res = syscall(__NR_socket, 2, 2, 0); if (res != -1) r[3] = res; break; case 4: syscall(__NR_fcntl, r[1], 0x407, 0); break; case 5: syscall(__NR_write, r[1], 0x20000140, 0x4240a2a0); break; case 6: *(uint16_t*)0x200002c0 = 2; *(uint16_t*)0x200002c2 = htobe16(0); *(uint8_t*)0x200002c4 = 0xac; *(uint8_t*)0x200002c5 = 0x14; *(uint8_t*)0x200002c6 = 0x14; *(uint8_t*)0x200002c7 = 0xaa; syscall(__NR_bind, r[3], 0x200002c0, 0x10); break; case 7: *(uint16_t*)0x20000040 = 2; *(uint16_t*)0x20000042 = htobe16(0); *(uint32_t*)0x20000044 = htobe32(0xe0000001); syscall(__NR_connect, r[3], 0x20000040, 0x10); break; case 8: syscall(__NR_splice, r[0], 0, r[3], 0, 0x30005, 0); break; case 9: memcpy((void*)0x20000100, "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\x86\xdd\x60\xdc\x65\x2b\x00\x14\x2c\x00\xfe\x80" "\x00\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00" "\x00\xaa\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\xaa\x00\x00\xff\xfe", 58); *(uint32_t*)0x2000013a = 0x41424344; *(uint32_t*)0x2000013e = 0x41424344; memcpy((void*)0x20000142, "\x40\x00\x00\x00\x90\x78\x00\x00", 8); syz_emit_ethernet(0x4a, 0x20000100, 0); break; } } int main(void) { syscall(__NR_mmap, 0x20000000, 0x1000000, 3, 0x32, -1, 0); do_sandbox_none(); return 0; }