summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2017-05-08 06:43:26 -0400
committerMike Pagano <mpagano@gentoo.org>2017-05-08 06:43:26 -0400
commit980f3d981da6d8a142740ba34507ce505a337d56 (patch)
treeae8b59d1b6757be764082a3867aeace0db3dacd5
parentLinux patch 4.9.26 (diff)
downloadlinux-patches-980f3d981da6d8a142740ba34507ce505a337d56.tar.gz
linux-patches-980f3d981da6d8a142740ba34507ce505a337d56.tar.bz2
linux-patches-980f3d981da6d8a142740ba34507ce505a337d56.zip
Linux patch 4.9.274.9-28
-rw-r--r--0000_README4
-rw-r--r--1026_linux-4.9.27.patch904
2 files changed, 908 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 64923fda..cc0a9c72 100644
--- a/0000_README
+++ b/0000_README
@@ -147,6 +147,10 @@ Patch: 1025_linux-4.9.26.patch
From: http://www.kernel.org
Desc: Linux 4.9.26
+Patch: 1026_linux-4.9.27.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.27
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1026_linux-4.9.27.patch b/1026_linux-4.9.27.patch
new file mode 100644
index 00000000..8127b523
--- /dev/null
+++ b/1026_linux-4.9.27.patch
@@ -0,0 +1,904 @@
+diff --git a/Makefile b/Makefile
+index c09679c1a70d..35d6b4e76264 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 3a9149cf0110..d0ac2d56520f 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -489,8 +489,7 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
+ int tpm_get_timeouts(struct tpm_chip *chip)
+ {
+ struct tpm_cmd_t tpm_cmd;
+- unsigned long new_timeout[4];
+- unsigned long old_timeout[4];
++ unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
+ struct duration_t *duration_cap;
+ ssize_t rc;
+
+@@ -542,11 +541,15 @@ int tpm_get_timeouts(struct tpm_chip *chip)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
+ return -EINVAL;
+
+- old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
+- old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
+- old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
+- old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+- memcpy(new_timeout, old_timeout, sizeof(new_timeout));
++ timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
++ timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
++ timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
++ timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
++ timeout_chip[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
++ timeout_chip[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
++ timeout_chip[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
++ timeout_chip[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
++ memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
+
+ /*
+ * Provide ability for vendor overrides of timeout values in case
+@@ -554,16 +557,24 @@ int tpm_get_timeouts(struct tpm_chip *chip)
+ */
+ if (chip->ops->update_timeouts != NULL)
+ chip->timeout_adjusted =
+- chip->ops->update_timeouts(chip, new_timeout);
++ chip->ops->update_timeouts(chip, timeout_eff);
+
+ if (!chip->timeout_adjusted) {
+- /* Don't overwrite default if value is 0 */
+- if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
+- int i;
++ /* Restore default if chip reported 0 */
++ int i;
+
++ for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
++ if (timeout_eff[i])
++ continue;
++
++ timeout_eff[i] = timeout_old[i];
++ chip->timeout_adjusted = true;
++ }
++
++ if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
+ /* timeouts in msec rather usec */
+- for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
+- new_timeout[i] *= 1000;
++ for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
++ timeout_eff[i] *= 1000;
+ chip->timeout_adjusted = true;
+ }
+ }
+@@ -572,16 +583,16 @@ int tpm_get_timeouts(struct tpm_chip *chip)
+ if (chip->timeout_adjusted) {
+ dev_info(&chip->dev,
+ HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
+- old_timeout[0], new_timeout[0],
+- old_timeout[1], new_timeout[1],
+- old_timeout[2], new_timeout[2],
+- old_timeout[3], new_timeout[3]);
++ timeout_chip[0], timeout_eff[0],
++ timeout_chip[1], timeout_eff[1],
++ timeout_chip[2], timeout_eff[2],
++ timeout_chip[3], timeout_eff[3]);
+ }
+
+- chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
+- chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
+- chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
+- chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
++ chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
++ chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
++ chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
++ chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
+
+ duration:
+ tpm_cmd.header.in = tpm_getcap_header;
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index 43146162c122..b99c1df48156 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -3115,7 +3115,7 @@ static int __init sm_it87_init(void)
+ {
+ int sioaddr[2] = { REG_2E, REG_4E };
+ struct it87_sio_data sio_data;
+- unsigned short isa_address;
++ unsigned short isa_address[2];
+ bool found = false;
+ int i, err;
+
+@@ -3125,15 +3125,29 @@ static int __init sm_it87_init(void)
+
+ for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
+ memset(&sio_data, 0, sizeof(struct it87_sio_data));
+- isa_address = 0;
+- err = it87_find(sioaddr[i], &isa_address, &sio_data);
+- if (err || isa_address == 0)
++ isa_address[i] = 0;
++ err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
++ if (err || isa_address[i] == 0)
+ continue;
++ /*
++ * Don't register second chip if its ISA address matches
++ * the first chip's ISA address.
++ */
++ if (i && isa_address[i] == isa_address[0])
++ break;
+
+- err = it87_device_add(i, isa_address, &sio_data);
++ err = it87_device_add(i, isa_address[i], &sio_data);
+ if (err)
+ goto exit_dev_unregister;
++
+ found = true;
++
++ /*
++ * IT8705F may respond on both SIO addresses.
++ * Stop probing after finding one.
++ */
++ if (sio_data.type == it87)
++ break;
+ }
+
+ if (!found) {
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 966eb4b61aed..a68c650aad11 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1847,7 +1847,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
+ if (r)
+ goto out;
+
+- param->data_size = sizeof(*param);
++ param->data_size = offsetof(struct dm_ioctl, data);
+ r = fn(param, input_param_size);
+
+ if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 3f218f5cf29b..c5ab1b0037fc 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
+ */
+ static int storvsc_timeout = 180;
+
+-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
+-
+ #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
+ static struct scsi_transport_template *fc_transport_template;
+ #endif
+@@ -1283,6 +1281,22 @@ static int storvsc_do_io(struct hv_device *device,
+ return ret;
+ }
+
++static int storvsc_device_alloc(struct scsi_device *sdevice)
++{
++ /*
++ * Set blist flag to permit the reading of the VPD pages even when
++ * the target may claim SPC-2 compliance. MSFT targets currently
++ * claim SPC-2 compliance while they implement post SPC-2 features.
++ * With this flag we can correctly handle WRITE_SAME_16 issues.
++ *
++ * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
++ * still supports REPORT LUN.
++ */
++ sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
++
++ return 0;
++}
++
+ static int storvsc_device_configure(struct scsi_device *sdevice)
+ {
+
+@@ -1298,14 +1312,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
+ sdevice->no_write_same = 1;
+
+ /*
+- * Add blist flags to permit the reading of the VPD pages even when
+- * the target may claim SPC-2 compliance. MSFT targets currently
+- * claim SPC-2 compliance while they implement post SPC-2 features.
+- * With this patch we can correctly handle WRITE_SAME_16 issues.
+- */
+- sdevice->sdev_bflags |= msft_blist_flags;
+-
+- /*
+ * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
+ * if the device is a MSFT virtual device. If the host is
+ * WIN10 or newer, allow write_same.
+@@ -1569,6 +1575,7 @@ static struct scsi_host_template scsi_driver = {
+ .eh_host_reset_handler = storvsc_host_reset_handler,
+ .proc_name = "storvsc_host",
+ .eh_timed_out = storvsc_eh_timed_out,
++ .slave_alloc = storvsc_device_alloc,
+ .slave_configure = storvsc_device_configure,
+ .cmd_per_lun = 255,
+ .this_id = -1,
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 6e3e63675e56..22d32d295c5b 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -5621,17 +5621,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
+ static void serial8250_io_resume(struct pci_dev *dev)
+ {
+ struct serial_private *priv = pci_get_drvdata(dev);
+- const struct pciserial_board *board;
++ struct serial_private *new;
+
+ if (!priv)
+ return;
+
+- board = priv->board;
+- kfree(priv);
+- priv = pciserial_init_ports(dev, board);
+-
+- if (!IS_ERR(priv)) {
+- pci_set_drvdata(dev, priv);
++ new = pciserial_init_ports(dev, priv->board);
++ if (!IS_ERR(new)) {
++ pci_set_drvdata(dev, new);
++ kfree(priv);
+ }
+ }
+
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 1e643c718917..18dc18f8af2c 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -315,7 +315,32 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
+ struct page **pages;
+ pgoff_t next_index;
+ int nr_pages = 0;
+- int ret;
++ int got = 0;
++ int ret = 0;
++
++ if (!current->journal_info) {
++ /* caller of readpages does not hold buffer and read caps
++ * (fadvise, madvise and readahead cases) */
++ int want = CEPH_CAP_FILE_CACHE;
++ ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
++ if (ret < 0) {
++ dout("start_read %p, error getting cap\n", inode);
++ } else if (!(got & want)) {
++ dout("start_read %p, no cache cap\n", inode);
++ ret = 0;
++ }
++ if (ret <= 0) {
++ if (got)
++ ceph_put_cap_refs(ci, got);
++ while (!list_empty(page_list)) {
++ page = list_entry(page_list->prev,
++ struct page, lru);
++ list_del(&page->lru);
++ put_page(page);
++ }
++ return ret;
++ }
++ }
+
+ off = (u64) page_offset(page);
+
+@@ -338,15 +363,18 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
+ CEPH_OSD_FLAG_READ, NULL,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ false);
+- if (IS_ERR(req))
+- return PTR_ERR(req);
++ if (IS_ERR(req)) {
++ ret = PTR_ERR(req);
++ goto out;
++ }
+
+ /* build page vector */
+ nr_pages = calc_pages_for(0, len);
+ pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
+- ret = -ENOMEM;
+- if (!pages)
+- goto out;
++ if (!pages) {
++ ret = -ENOMEM;
++ goto out_put;
++ }
+ for (i = 0; i < nr_pages; ++i) {
+ page = list_entry(page_list->prev, struct page, lru);
+ BUG_ON(PageLocked(page));
+@@ -379,6 +407,12 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
+ if (ret < 0)
+ goto out_pages;
+ ceph_osdc_put_request(req);
++
++ /* After adding locked pages to page cache, the inode holds cache cap.
++ * So we can drop our cap refs. */
++ if (got)
++ ceph_put_cap_refs(ci, got);
++
+ return nr_pages;
+
+ out_pages:
+@@ -387,8 +421,11 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
+ unlock_page(pages[i]);
+ }
+ ceph_put_page_vector(pages, nr_pages, false);
+-out:
++out_put:
+ ceph_osdc_put_request(req);
++out:
++ if (got)
++ ceph_put_cap_refs(ci, got);
+ return ret;
+ }
+
+@@ -425,7 +462,6 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
+ rc = start_read(inode, page_list, max);
+ if (rc < 0)
+ goto out;
+- BUG_ON(rc == 0);
+ }
+ out:
+ ceph_fscache_readpages_cancel(inode, page_list);
+@@ -1372,9 +1408,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
+
+ if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
+- ci->i_inline_version == CEPH_INLINE_NONE)
++ ci->i_inline_version == CEPH_INLINE_NONE) {
++ current->journal_info = vma->vm_file;
+ ret = filemap_fault(vma, vmf);
+- else
++ current->journal_info = NULL;
++ } else
+ ret = -EAGAIN;
+
+ dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index f3f21105b860..03951f90ecf7 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2479,6 +2479,27 @@ static void check_max_size(struct inode *inode, loff_t endoff)
+ ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+ }
+
++int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
++{
++ int ret, err = 0;
++
++ BUG_ON(need & ~CEPH_CAP_FILE_RD);
++ BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
++ ret = ceph_pool_perm_check(ci, need);
++ if (ret < 0)
++ return ret;
++
++ ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
++ if (ret) {
++ if (err == -EAGAIN) {
++ ret = 0;
++ } else if (err < 0) {
++ ret = err;
++ }
++ }
++ return ret;
++}
++
+ /*
+ * Wait for caps, and take cap references. If we can't get a WR cap
+ * due to a small max_size, make sure we check_max_size (and possibly
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index f995e3528a33..ca3f630db90f 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1249,8 +1249,9 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+ ceph_cap_string(got));
+-
++ current->journal_info = filp;
+ ret = generic_file_read_iter(iocb, to);
++ current->journal_info = NULL;
+ }
+ dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
+ inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 3e3fa9163059..622d5dd9f616 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -905,6 +905,8 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
+
+ extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+ loff_t endoff, int *got, struct page **pinned_page);
++extern int ceph_try_get_caps(struct ceph_inode_info *ci,
++ int need, int want, int *got);
+
+ /* for counting open files by mode */
+ extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 94661cf77ae8..b3830f7ab260 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -241,6 +241,7 @@ struct smb_version_operations {
+ /* verify the message */
+ int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
+ bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
++ int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
+ void (*downgrade_oplock)(struct TCP_Server_Info *,
+ struct cifsInodeInfo *, bool);
+ /* process transaction2 response */
+@@ -1314,12 +1315,19 @@ struct mid_q_entry {
+ void *callback_data; /* general purpose pointer for callback */
+ void *resp_buf; /* pointer to received SMB header */
+ int mid_state; /* wish this were enum but can not pass to wait_event */
++ unsigned int mid_flags;
+ __le16 command; /* smb command code */
+ bool large_buf:1; /* if valid response, is pointer to large buf */
+ bool multiRsp:1; /* multiple trans2 responses for one request */
+ bool multiEnd:1; /* both received */
+ };
+
++struct close_cancelled_open {
++ struct cifs_fid fid;
++ struct cifs_tcon *tcon;
++ struct work_struct work;
++};
++
+ /* Make code in transport.c a little cleaner by moving
+ update of optional stats into function below */
+ #ifdef CONFIG_CIFS_STATS2
+@@ -1451,6 +1459,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
+ #define MID_RESPONSE_MALFORMED 0x10
+ #define MID_SHUTDOWN 0x20
+
++/* Flags */
++#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
++
+ /* Types of response buffer returned from SendReceive2 */
+ #define CIFS_NO_BUFFER 0 /* Response buffer not returned */
+ #define CIFS_SMALL_BUFFER 1
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index e3fed9249a04..586fdac05ec2 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -1423,6 +1423,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+
+ length = discard_remaining_data(server);
+ dequeue_mid(mid, rdata->result);
++ mid->resp_buf = server->smallbuf;
++ server->smallbuf = NULL;
+ return length;
+ }
+
+@@ -1534,6 +1536,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ return cifs_readv_discard(server, mid);
+
+ dequeue_mid(mid, false);
++ mid->resp_buf = server->smallbuf;
++ server->smallbuf = NULL;
+ return length;
+ }
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 893be0722643..b8015de88e8c 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -882,10 +882,19 @@ cifs_demultiplex_thread(void *p)
+
+ server->lstrp = jiffies;
+ if (mid_entry != NULL) {
++ if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
++ mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
++ server->ops->handle_cancelled_mid)
++ server->ops->handle_cancelled_mid(
++ mid_entry->resp_buf,
++ server);
++
+ if (!mid_entry->multiRsp || mid_entry->multiEnd)
+ mid_entry->callback(mid_entry);
+- } else if (!server->ops->is_oplock_break ||
+- !server->ops->is_oplock_break(buf, server)) {
++ } else if (server->ops->is_oplock_break &&
++ server->ops->is_oplock_break(buf, server)) {
++ cifs_dbg(FYI, "Received oplock break\n");
++ } else {
+ cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
+ atomic_read(&midCount));
+ cifs_dump_mem("Received Data is: ", buf,
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 3d383489b9cf..97307808ae42 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -654,3 +654,47 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
+ return false;
+ }
++
++void
++smb2_cancelled_close_fid(struct work_struct *work)
++{
++ struct close_cancelled_open *cancelled = container_of(work,
++ struct close_cancelled_open, work);
++
++ cifs_dbg(VFS, "Close unmatched open\n");
++
++ SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
++ cancelled->fid.volatile_fid);
++ cifs_put_tcon(cancelled->tcon);
++ kfree(cancelled);
++}
++
++int
++smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
++{
++ struct smb2_hdr *hdr = (struct smb2_hdr *)buffer;
++ struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
++ struct cifs_tcon *tcon;
++ struct close_cancelled_open *cancelled;
++
++ if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS)
++ return 0;
++
++ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
++ if (!cancelled)
++ return -ENOMEM;
++
++ tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId);
++ if (!tcon) {
++ kfree(cancelled);
++ return -ENOENT;
++ }
++
++ cancelled->fid.persistent_fid = rsp->PersistentFileId;
++ cancelled->fid.volatile_fid = rsp->VolatileFileId;
++ cancelled->tcon = tcon;
++ INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
++ queue_work(cifsiod_wq, &cancelled->work);
++
++ return 0;
++}
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 5d456ebb3813..007abf7195af 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1565,6 +1565,7 @@ struct smb_version_operations smb20_operations = {
+ .clear_stats = smb2_clear_stats,
+ .print_stats = smb2_print_stats,
+ .is_oplock_break = smb2_is_valid_oplock_break,
++ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+@@ -1645,6 +1646,7 @@ struct smb_version_operations smb21_operations = {
+ .clear_stats = smb2_clear_stats,
+ .print_stats = smb2_print_stats,
+ .is_oplock_break = smb2_is_valid_oplock_break,
++ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+@@ -1727,6 +1729,7 @@ struct smb_version_operations smb30_operations = {
+ .print_stats = smb2_print_stats,
+ .dump_share_caps = smb2_dump_share_caps,
+ .is_oplock_break = smb2_is_valid_oplock_break,
++ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+@@ -1815,6 +1818,7 @@ struct smb_version_operations smb311_operations = {
+ .print_stats = smb2_print_stats,
+ .dump_share_caps = smb2_dump_share_caps,
+ .is_oplock_break = smb2_is_valid_oplock_break,
++ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index f2d511a6971b..04ef6e914597 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
+ struct smb_rqst *rqst);
+ extern struct mid_q_entry *smb2_setup_async_request(
+ struct TCP_Server_Info *server, struct smb_rqst *rqst);
++extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
++ __u64 ses_id);
++extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
++ __u64 ses_id, __u32 tid);
+ extern int smb2_calc_signature(struct smb_rqst *rqst,
+ struct TCP_Server_Info *server);
+ extern int smb3_calc_signature(struct smb_rqst *rqst,
+@@ -158,6 +162,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+ extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+ const u64 persistent_fid, const u64 volatile_fid,
+ const __u8 oplock_level);
++extern int smb2_handle_cancelled_mid(char *buffer,
++ struct TCP_Server_Info *server);
++void smb2_cancelled_close_fid(struct work_struct *work);
+ extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id,
+ struct kstatfs *FSData);
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index bc9a7b634643..390b0d0198f8 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -115,22 +115,68 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
+ }
+
+ static struct cifs_ses *
+-smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
++smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
+ {
+ struct cifs_ses *ses;
+
+- spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+- if (ses->Suid != smb2hdr->SessionId)
++ if (ses->Suid != ses_id)
+ continue;
+- spin_unlock(&cifs_tcp_ses_lock);
+ return ses;
+ }
++
++ return NULL;
++}
++
++struct cifs_ses *
++smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
++{
++ struct cifs_ses *ses;
++
++ spin_lock(&cifs_tcp_ses_lock);
++ ses = smb2_find_smb_ses_unlocked(server, ses_id);
+ spin_unlock(&cifs_tcp_ses_lock);
+
++ return ses;
++}
++
++static struct cifs_tcon *
++smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
++{
++ struct cifs_tcon *tcon;
++
++ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++ if (tcon->tid != tid)
++ continue;
++ ++tcon->tc_count;
++ return tcon;
++ }
++
+ return NULL;
+ }
+
++/*
++ * Obtain tcon corresponding to the tid in the given
++ * cifs_ses
++ */
++
++struct cifs_tcon *
++smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
++{
++ struct cifs_ses *ses;
++ struct cifs_tcon *tcon;
++
++ spin_lock(&cifs_tcp_ses_lock);
++ ses = smb2_find_smb_ses_unlocked(server, ses_id);
++ if (!ses) {
++ spin_unlock(&cifs_tcp_ses_lock);
++ return NULL;
++ }
++ tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
++ spin_unlock(&cifs_tcp_ses_lock);
++
++ return tcon;
++}
+
+ int
+ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+@@ -142,7 +188,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+ struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
+ struct cifs_ses *ses;
+
+- ses = smb2_find_smb_ses(smb2_pdu, server);
++ ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
+ if (!ses) {
+ cifs_dbg(VFS, "%s: Could not find session\n", __func__);
+ return 0;
+@@ -359,7 +405,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+ struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
+ struct cifs_ses *ses;
+
+- ses = smb2_find_smb_ses(smb2_pdu, server);
++ ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
+ if (!ses) {
+ cifs_dbg(VFS, "%s: Could not find session\n", __func__);
+ return 0;
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 206a597b2293..cc26d4138d70 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -727,9 +727,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
+
+ rc = wait_for_response(ses->server, midQ);
+ if (rc != 0) {
++ cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
+ send_cancel(ses->server, buf, midQ);
+ spin_lock(&GlobalMid_Lock);
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
++ midQ->mid_flags |= MID_WAIT_CANCELLED;
+ midQ->callback = DeleteMidQEntry;
+ spin_unlock(&GlobalMid_Lock);
+ cifs_small_buf_release(buf);
+diff --git a/fs/timerfd.c b/fs/timerfd.c
+index 9ae4abb4110b..ab8dd1538381 100644
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -40,6 +40,7 @@ struct timerfd_ctx {
+ short unsigned settime_flags; /* to show in fdinfo */
+ struct rcu_head rcu;
+ struct list_head clist;
++ spinlock_t cancel_lock;
+ bool might_cancel;
+ };
+
+@@ -112,7 +113,7 @@ void timerfd_clock_was_set(void)
+ rcu_read_unlock();
+ }
+
+-static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
++static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
+ {
+ if (ctx->might_cancel) {
+ ctx->might_cancel = false;
+@@ -122,6 +123,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
+ }
+ }
+
++static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
++{
++ spin_lock(&ctx->cancel_lock);
++ __timerfd_remove_cancel(ctx);
++ spin_unlock(&ctx->cancel_lock);
++}
++
+ static bool timerfd_canceled(struct timerfd_ctx *ctx)
+ {
+ if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
+@@ -132,6 +140,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
+
+ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
+ {
++ spin_lock(&ctx->cancel_lock);
+ if ((ctx->clockid == CLOCK_REALTIME ||
+ ctx->clockid == CLOCK_REALTIME_ALARM) &&
+ (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
+@@ -141,9 +150,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
+ list_add_rcu(&ctx->clist, &cancel_list);
+ spin_unlock(&cancel_lock);
+ }
+- } else if (ctx->might_cancel) {
+- timerfd_remove_cancel(ctx);
++ } else {
++ __timerfd_remove_cancel(ctx);
+ }
++ spin_unlock(&ctx->cancel_lock);
+ }
+
+ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
+@@ -400,6 +410,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
+ return -ENOMEM;
+
+ init_waitqueue_head(&ctx->wqh);
++ spin_lock_init(&ctx->cancel_lock);
+ ctx->clockid = clockid;
+
+ if (isalarm(ctx))
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 217fd2e7f435..99c6c568bc55 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1441,14 +1441,12 @@ static void cpuhp_store_callbacks(enum cpuhp_state state,
+ /* (Un)Install the callbacks for further cpu hotplug operations */
+ struct cpuhp_step *sp;
+
+- mutex_lock(&cpuhp_state_mutex);
+ sp = cpuhp_get_step(state);
+ sp->startup.single = startup;
+ sp->teardown.single = teardown;
+ sp->name = name;
+ sp->multi_instance = multi_instance;
+ INIT_HLIST_HEAD(&sp->list);
+- mutex_unlock(&cpuhp_state_mutex);
+ }
+
+ static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
+@@ -1518,16 +1516,13 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
+ {
+ enum cpuhp_state i;
+
+- mutex_lock(&cpuhp_state_mutex);
+ for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
+ if (cpuhp_ap_states[i].name)
+ continue;
+
+ cpuhp_ap_states[i].name = "Reserved";
+- mutex_unlock(&cpuhp_state_mutex);
+ return i;
+ }
+- mutex_unlock(&cpuhp_state_mutex);
+ WARN(1, "No more dynamic states available for CPU hotplug\n");
+ return -ENOSPC;
+ }
+@@ -1544,6 +1539,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ return -EINVAL;
+
+ get_online_cpus();
++ mutex_lock(&cpuhp_state_mutex);
+
+ if (!invoke || !sp->startup.multi)
+ goto add_node;
+@@ -1568,11 +1564,10 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ }
+ add_node:
+ ret = 0;
+- mutex_lock(&cpuhp_state_mutex);
+ hlist_add_head(node, &sp->list);
+- mutex_unlock(&cpuhp_state_mutex);
+
+ err:
++ mutex_unlock(&cpuhp_state_mutex);
+ put_online_cpus();
+ return ret;
+ }
+@@ -1601,6 +1596,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ return -EINVAL;
+
+ get_online_cpus();
++ mutex_lock(&cpuhp_state_mutex);
+
+ /* currently assignments for the ONLINE state are possible */
+ if (state == CPUHP_AP_ONLINE_DYN) {
+@@ -1636,6 +1632,8 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ }
+ }
+ out:
++ mutex_unlock(&cpuhp_state_mutex);
++
+ put_online_cpus();
+ if (!ret && dyn_state)
+ return state;
+@@ -1655,6 +1653,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ return -EINVAL;
+
+ get_online_cpus();
++ mutex_lock(&cpuhp_state_mutex);
++
+ if (!invoke || !cpuhp_get_teardown_cb(state))
+ goto remove;
+ /*
+@@ -1671,7 +1671,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ }
+
+ remove:
+- mutex_lock(&cpuhp_state_mutex);
+ hlist_del(node);
+ mutex_unlock(&cpuhp_state_mutex);
+ put_online_cpus();
+@@ -1696,6 +1695,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+ BUG_ON(cpuhp_cb_check(state));
+
+ get_online_cpus();
++ mutex_lock(&cpuhp_state_mutex);
+
+ if (sp->multi_instance) {
+ WARN(!hlist_empty(&sp->list),
+@@ -1721,6 +1721,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+ }
+ remove:
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
++ mutex_unlock(&cpuhp_state_mutex);
+ put_online_cpus();
+ }
+ EXPORT_SYMBOL(__cpuhp_remove_state);