diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0817d88383d5..0e005b1a60e3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5288,10 +5288,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, goto failed; /* Attempt the batch allocation */ - local_lock_irqsave(&pagesets.lock, flags); - pcp = this_cpu_ptr(zone->per_cpu_pageset); - pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; - while (nr_populated < nr_pages) { /* Skip existing pages */ @@ -5300,12 +5296,16 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, continue; } + local_lock_irqsave(&pagesets.lock, flags); + pcp = this_cpu_ptr(zone->per_cpu_pageset); + pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, pcp, pcp_list); + local_unlock_irqrestore(&pagesets.lock, flags); if (unlikely(!page)) { /* Try and get at least one page */ if (!nr_populated) - goto failed_irq; + goto failed; break; } nr_account++; @@ -5318,16 +5318,11 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, nr_populated++; } - local_unlock_irqrestore(&pagesets.lock, flags); - __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); return nr_populated; -failed_irq: - local_unlock_irqrestore(&pagesets.lock, flags); - failed: page = __alloc_pages(gfp, 0, preferred_nid, nodemask); if (page) { diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index fd69a03d6137a..ad78bf631900b 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -156,12 +156,11 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > VIRTIO_SCSI_SENSE_SIZE); - if (sc->sense_buffer) { + if (resp->sense_len) { memcpy(sc->sense_buffer, resp->sense, min_t(u32, virtio32_to_cpu(vscsi->vdev, resp->sense_len), VIRTIO_SCSI_SENSE_SIZE)); - set_status_byte(sc, SAM_STAT_CHECK_CONDITION); } sc->scsi_done(sc); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index b04a5a02ecf3..d8e1ac1ae10d 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -758,20 +758,46 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) if (event == HCI_DEV_UNREG) { struct sock *sk; + bool put_dev; +restart: + put_dev = false; /* Detach sockets from device */ read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { + /* hci_sk_list.lock is preventing hci_sock_release() + * from calling bt_sock_unlink(). + */ + if (hci_pi(sk)->hdev != hdev || sk_unhashed(sk)) + continue; + /* Take a ref because we can't call lock_sock() with + * hci_sk_list.lock held. + */ + sock_hold(sk); + read_unlock(&hci_sk_list.lock); lock_sock(sk); - if (hci_pi(sk)->hdev == hdev) { + /* Since hci_sock_release() might have already called + * bt_sock_unlink() while waiting for lock_sock(), + * use sk_hashed(sk) for checking that bt_sock_unlink() + * is not yet called. + */ + write_lock(&hci_sk_list.lock); + if (sk_hashed(sk) && hci_pi(sk)->hdev == hdev) { hci_pi(sk)->hdev = NULL; sk->sk_err = EPIPE; sk->sk_state = BT_OPEN; sk->sk_state_change(sk); - - hci_dev_put(hdev); + put_dev = true; } + write_unlock(&hci_sk_list.lock); release_sock(sk); + sock_put(sk); + if (put_dev) + hci_dev_put(hdev); + /* Restarting is safe, for hci_pi(sk)->hdev != hdev if + * condition met and sk_unhashed(sk) == true otherwise. + */ + goto restart; } read_unlock(&hci_sk_list.lock); }