Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 2 Oct 2016 17:36:41 +0000 (10:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 2 Oct 2016 17:36:41 +0000 (10:36 -0700)
Pull networking fixes from David Miller:

 1) Fix wrong TCP checksums on MTU probing when checksum offloading is
    disabled, from Douglas Caetano dos Santos.

 2) Fix qdisc backlog updates in qfq and sfb schedulers, from Cong Wang.

 3) Route lookup flow key protocol value is wrong in ip6gre_xmit_other(),
    fix from Lance Richardson.

 4) Scheduling while atomic in multicast routing code of ipv4 and ipv6,
    fix from Nikolay Aleksandrov.

 5) Fix packet alignment in fec driver, from Eric Nelson.

 6) Fix perf regression in sctp due to struct layout and cache misses,
    from Xin Long.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  sctp: fix the issue sctp_diag uses lock_sock in rcu_read_lock
  sctp: change to check peer prsctp_capable when using prsctp polices
  sctp: remove prsctp_param from sctp_chunk
  sctp: move sent_count to the memory hole in sctp_chunk
  tg3: Avoid NULL pointer dereference in tg3_io_error_detected()
  act_ife: Fix false encoding
  act_ife: Fix external mac header on encode
  VSOCK: Don't dec ack backlog twice for rejected connections
  Revert "net: ethernet: bcmgenet: use phydev from struct net_device"
  net: fec: align IP header in hardware
  net: fec: remove QUIRK_HAS_RACC from i.mx27
  net: fec: remove QUIRK_HAS_RACC from i.mx25
  ipmr, ip6mr: fix scheduling while atomic and a deadlock with ipmr_get_route
  ip6_gre: fix flowi6_proto value in ip6gre_xmit_other()
  tcp: fix a compile error in DBGUNDO()
  tcp: fix wrong checksum calculation on MTU probing
  sch_sfb: keep backlog updated with qlen
  sch_qfq: keep backlog updated with qlen
  can: dev: fix deadlock reported after bus-off

93 files changed:
.mailmap
Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
MAINTAINERS
Makefile
arch/arm64/include/asm/debug-monitors.h
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/smp.c
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/ath79/clock.c
arch/mips/cavium-octeon/octeon-platform.c
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
arch/mips/kernel/mips-r2-to-r6-emul.c
arch/mips/kernel/process.c
arch/mips/kernel/setup.c
arch/mips/kernel/smp.c
arch/mips/kernel/uprobes.c
arch/mips/kernel/vdso.c
arch/mips/math-emu/dsemul.c
arch/mips/mm/c-r4k.c
arch/mips/mm/init.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/sh/include/asm/atomic-llsc.h
arch/x86/events/intel/bts.c
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi_64.c
block/blk-mq.c
block/blk-throttle.c
crypto/rsa-pkcs1pad.c
drivers/acpi/nfit/core.c
drivers/base/regmap/regmap.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/udl/udl_fb.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/input/joydev.c
drivers/input/touchscreen/silead.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mips-gic.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/mtk_ecc.c
drivers/mtd/nand/mtk_nand.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/omap2.c
drivers/nvdimm/core.c
drivers/nvdimm/nd.h
drivers/nvdimm/region_devs.c
drivers/nvme/host/rdma.c
drivers/scsi/hosts.c
drivers/scsi/scsi.c
drivers/scsi/scsi_priv.h
fs/btrfs/extent-tree.c
fs/btrfs/ioctl.c
fs/configfs/file.c
fs/ocfs2/aops.c
include/linux/dma-mapping.h
include/linux/pagemap.h
include/linux/property.h
include/linux/swap.h
include/scsi/scsi_host.h
kernel/cgroup.c
kernel/cpuset.c
kernel/events/core.c
kernel/irq/chip.c
kernel/trace/trace.c
lib/Kconfig.debug
lib/radix-tree.c
mm/filemap.c
mm/huge_memory.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/shmem.c
mm/vmscan.c
mm/workingset.c
scripts/recordmcount.c
scripts/recordmcount.pl
security/keys/encrypted-keys/encrypted.c
tools/testing/nvdimm/test/nfit.c
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/multiorder.c

index de22daefd9daecbf956d9e874aa5664355416597..1dab0a156489eb896ff37fef56cf54eddc29140f 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -69,6 +69,7 @@ James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
 James Ketrenos <jketreno@io.(none)>
+Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
 Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
index 1112e0d794e172c43bb09ff866742d0a7fa314b5..820fee4b77b601e7f4c5521e854a06d2dd78408b 100644 (file)
@@ -13,6 +13,7 @@ Required properties:
 - touchscreen-size-y     : See touchscreen.txt
 
 Optional properties:
+- firmware-name                  : File basename (string) for board specific firmware
 - touchscreen-inverted-x  : See touchscreen.txt
 - touchscreen-inverted-y  : See touchscreen.txt
 - touchscreen-swapped-x-y : See touchscreen.txt
index 01bff8ea28d88bf8a667100fa37dd376e049634e..f593300e310b80f4ca41c37dd9fc038003f42a08 100644 (file)
@@ -8745,7 +8745,7 @@ F:        drivers/oprofile/
 F:     include/linux/oprofile.h
 
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
-M:     Mark Fasheh <mfasheh@suse.com>
+M:     Mark Fasheh <mfasheh@versity.com>
 M:     Joel Becker <jlbec@evilplan.org>
 L:     ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 W:     http://ocfs2.wiki.kernel.org
@@ -11626,7 +11626,7 @@ F:      Documentation/devicetree/bindings/thermal/
 THERMAL/CPU_COOLING
 M:     Amit Daniel Kachhap <amit.kachhap@gmail.com>
 M:     Viresh Kumar <viresh.kumar@linaro.org>
-M:     Javi Merino <javi.merino@arm.com>
+M:     Javi Merino <javi.merino@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/thermal/cpu-cooling-api.txt
index 74e22c2f408bc9c208aaaaabe370e2725f615535..ce2ddb3fec9845ed84d61213ea396390905e8a34 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index 4b6b3f72a2158a06e07f36529912c81c0b5fb43b..b71420a12f2613456c4233fb96596f4f0fe72e44 100644 (file)
@@ -61,8 +61,6 @@
 
 #define AARCH64_BREAK_KGDB_DYN_DBG     \
        (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
-#define KGDB_DYN_BRK_INS_BYTE(x)       \
-       ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
 
 #define CACHE_FLUSH_IS_SAFE            1
 
index 8c57f6496e56dc65f624569f9496f8907991516b..e017a9493b92e907235cc19e7df7e53397c3f56e 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/bug.h>
 #include <linux/irq.h>
 #include <linux/kdebug.h>
 #include <linux/kgdb.h>
 #include <linux/kprobes.h>
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
 #include <asm/traps.h>
 
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
        unregister_die_notifier(&kgdb_notifier);
 }
 
-/*
- * ARM instructions are always in LE.
- * Break instruction is encoded in LE format
- */
-struct kgdb_arch arch_kgdb_ops = {
-       .gdb_bpt_instr = {
-               KGDB_DYN_BRK_INS_BYTE(0),
-               KGDB_DYN_BRK_INS_BYTE(1),
-               KGDB_DYN_BRK_INS_BYTE(2),
-               KGDB_DYN_BRK_INS_BYTE(3),
-       }
-};
+struct kgdb_arch arch_kgdb_ops;
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+       int err;
+
+       BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
+
+       err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
+       if (err)
+               return err;
+
+       return aarch64_insn_write((void *)bpt->bpt_addr,
+                       (u32)AARCH64_BREAK_KGDB_DYN_DBG);
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+       return aarch64_insn_write((void *)bpt->bpt_addr,
+                       *(u32 *)bpt->saved_instr);
+}
index d93d433525047e41c20760d0ec24e10524333804..3ff173e9258230921fc26f895efbdaee38728e1e 100644 (file)
@@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        return ret;
 }
 
-static void smp_store_cpu_info(unsigned int cpuid)
-{
-       store_cpu_topology(cpuid);
-       numa_store_cpu_info(cpuid);
-}
-
 /*
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
@@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
         */
        notify_cpu_starting(cpu);
 
-       smp_store_cpu_info(cpu);
+       store_cpu_topology(cpu);
 
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
@@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        int err;
        unsigned int cpu;
+       unsigned int this_cpu;
 
        init_cpu_topology();
 
-       smp_store_cpu_info(smp_processor_id());
+       this_cpu = smp_processor_id();
+       store_cpu_topology(this_cpu);
+       numa_store_cpu_info(this_cpu);
 
        /*
         * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                        continue;
 
                set_cpu_present(cpu, true);
+               numa_store_cpu_info(cpu);
        }
 }
 
index 26388562e300ee1ad01acfe856117b369af3ca73..212ff92920d23e4a66d3b258815450e2934b594c 100644 (file)
@@ -65,6 +65,7 @@ config MIPS
        select ARCH_CLOCKSOURCE_DATA
        select HANDLE_DOMAIN_IRQ
        select HAVE_EXIT_THREAD
+       select HAVE_REGS_AND_STACK_ACCESS_API
 
 menu "Machine selection"
 
index f0e314ceb8baa84d36ea6664772f71df46ad003f..7f975b20b20c713e6225d14e3984013f57fe5026 100644 (file)
@@ -113,42 +113,6 @@ config SPINLOCK_TEST
        help
          Add several files to the debugfs to test spinlock speed.
 
-if CPU_MIPSR6
-
-choice
-       prompt "Compact branch policy"
-       default MIPS_COMPACT_BRANCHES_OPTIMAL
-
-config MIPS_COMPACT_BRANCHES_NEVER
-       bool "Never (force delay slot branches)"
-       help
-         Pass the -mcompact-branches=never flag to the compiler in order to
-         force it to always emit branches with delay slots, and make no use
-         of the compact branch instructions introduced by MIPSr6. This is
-         useful if you suspect there may be an issue with compact branches in
-         either the compiler or the CPU.
-
-config MIPS_COMPACT_BRANCHES_OPTIMAL
-       bool "Optimal (use where beneficial)"
-       help
-         Pass the -mcompact-branches=optimal flag to the compiler in order for
-         it to make use of compact branch instructions where it deems them
-         beneficial, and use branches with delay slots elsewhere. This is the
-         default compiler behaviour, and should be used unless you have a
-         reason to choose otherwise.
-
-config MIPS_COMPACT_BRANCHES_ALWAYS
-       bool "Always (force compact branches)"
-       help
-         Pass the -mcompact-branches=always flag to the compiler in order to
-         force it to always emit compact branches, making no use of branch
-         instructions with delay slots. This can result in more compact code
-         which may be beneficial in some scenarios.
-
-endchoice
-
-endif # CPU_MIPSR6
-
 config SCACHE_DEBUGFS
        bool "L2 cache debugfs entries"
        depends on DEBUG_FS
index efd7a9dc93c4161a381081c569a5b4e91b182f5e..598ab2930fce67bb373827d7bbb09be35880a75e 100644 (file)
@@ -203,10 +203,6 @@ endif
 toolchain-virt                         := $(call cc-option-yn,$(mips-cflags) -mvirt)
 cflags-$(toolchain-virt)               += -DTOOLCHAIN_SUPPORTS_VIRT
 
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER)   += -mcompact-branches=never
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
-cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS)  += -mcompact-branches=always
-
 #
 # Firmware support
 #
index 2e7378467c5cb113e77068625fcca20d0a9b89b9..cc3a1e33a600ec7a8dd6f73ca12354955065d44d 100644 (file)
@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
        struct clk *clk;
 
        clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
-       if (!clk)
+       if (IS_ERR(clk))
                panic("failed to allocate %s clock structure", name);
 
        return clk;
index b31fbc9d6eae23ea37e07591f5c664e3f9346903..37a932d9148c214e2cb3fe9668af7330f3f8bf52 100644 (file)
@@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
 {
        return of_platform_bus_probe(NULL, octeon_ids, NULL);
 }
-device_initcall(octeon_publish_devices);
+arch_initcall(octeon_publish_devices);
 
 MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
 MODULE_LICENSE("GPL");
index 56584a659183db05b7dc5a8697ab8497bd4e45cd..83054f79f72aa62595923ca017936f6cd60075fd 100644 (file)
        ldc1    $f28, THREAD_FPR28(\thread)
        ldc1    $f30, THREAD_FPR30(\thread)
        ctc1    \tmp, fcr31
+       .set    pop
        .endm
 
        .macro  fpu_restore_16odd thread
index 0cf5ac1f72452dc9c4f6980e500e5fde43e08b4c..8ff2cbdf2c3ee8f74a4b3e9df6c7369c1f39dee8 100644 (file)
@@ -15,8 +15,8 @@
 static inline bool __should_swizzle_bits(volatile void *a)
 {
        extern const bool octeon_should_swizzle_table[];
+       u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
 
-       unsigned long did = ((unsigned long)a >> 40) & 0xff;
        return octeon_should_swizzle_table[did];
 }
 
@@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
 
 #define __should_swizzle_bits(a)       false
 
-static inline bool __should_swizzle_addr(unsigned long p)
+static inline bool __should_swizzle_addr(u64 p)
 {
        /* boot bus? */
        return ((p >> 40) & 0xff) == 0;
index 2f82bfa3a77347155a1526dde873387d01dcf138..c9f5769dfc8fca9d10c1ce4fe12ff62d3c4c8c66 100644 (file)
 #define CP0_EBASE $15, 1
 
        .macro  kernel_entry_setup
+#ifdef CONFIG_SMP
        mfc0    t0, CP0_EBASE
        andi    t0, t0, 0x3ff           # CPUNum
        beqz    t0, 1f
        # CPUs other than zero goto smp_bootstrap
        j       smp_bootstrap
+#endif /* CONFIG_SMP */
 
 1:
        .endm
index c3372cac6db220eebe2ff6eba44a1a71c01275be..0a7e10b5f9e39eb312e0e48b2eab1966df6854f3 100644 (file)
@@ -1164,7 +1164,9 @@ fpu_emul:
                regs->regs[31] = r31;
                regs->cp0_epc = epc;
                if (!used_math()) {     /* First time FPU user.  */
+                       preempt_disable();
                        err = init_fpu();
+                       preempt_enable();
                        set_used_math();
                }
                lose_fpu(1);    /* Save FPU state for the emulator. */
index 7429ad09fbe3e1178ad37f0e34ed2f1651ac70ac..d2d061520a23000116cc9c6d9f0a7f4a077aa372 100644 (file)
@@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
                return -EOPNOTSUPP;
 
        /* Avoid inadvertently triggering emulation */
-       if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
-           !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+       if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
+           !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
                return -EOPNOTSUPP;
-       if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
+       if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
                return -EOPNOTSUPP;
 
        /* FR = 0 not supported in MIPS R6 */
-       if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+       if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
                return -EOPNOTSUPP;
 
        /* Proceed with the mode switch */
index 36cf8d65c47dbef02c408fd9b30428a8a4828998..3be0e6ba2797c4c5055982d908c9a277c975651d 100644 (file)
@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
        int x = boot_mem_map.nr_map;
        int i;
 
+       /*
+        * If the region reaches the top of the physical address space, adjust
+        * the size slightly so that (start + size) doesn't overflow
+        */
+       if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
+               --size;
+
        /* Sanity check */
        if (start + size < start) {
                pr_warn("Trying to add an invalid memory region, skipped\n");
index f95f094f36e4eb035d3edc919f6573c6ebc90f65..b0baf48951faabffac7a53fde37dc79748dea0ac 100644 (file)
@@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
        cpumask_set_cpu(cpu, &cpu_coherent_mask);
        notify_cpu_starting(cpu);
 
+       cpumask_set_cpu(cpu, &cpu_callin_map);
+       synchronise_count_slave(cpu);
+
        set_cpu_online(cpu, true);
 
        set_cpu_sibling_map(cpu);
@@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
 
        calculate_cpu_foreign_map();
 
-       cpumask_set_cpu(cpu, &cpu_callin_map);
-
-       synchronise_count_slave(cpu);
-
        /*
         * irq will be enabled in ->smp_finish(), enabling it too early
         * is dangerous.
index 8452d933a6453c9a1f8655d56a3e0812605a6a12..1149b30c9aebf7a00a0723f46a823bbe8c602425 100644 (file)
@@ -222,7 +222,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
                return NOTIFY_DONE;
 
        switch (val) {
-       case DIE_BREAK:
+       case DIE_UPROBE:
                if (uprobe_pre_sstep_notifier(regs))
                        return NOTIFY_STOP;
                break;
index 9abe447a4b480a8254633fd1afa9f3f7dbac319e..f9dbfb14af3358e67b3ef3e922709ab38ba0bdd8 100644 (file)
@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
 static void __init init_vdso_image(struct mips_vdso_image *image)
 {
        unsigned long num_pages, i;
+       unsigned long data_pfn;
 
        BUG_ON(!PAGE_ALIGNED(image->data));
        BUG_ON(!PAGE_ALIGNED(image->size));
 
        num_pages = image->size / PAGE_SIZE;
 
-       for (i = 0; i < num_pages; i++) {
-               image->mapping.pages[i] =
-                       virt_to_page(image->data + (i * PAGE_SIZE));
-       }
+       data_pfn = __phys_to_pfn(__pa_symbol(image->data));
+       for (i = 0; i < num_pages; i++)
+               image->mapping.pages[i] = pfn_to_page(data_pfn + i);
 }
 
 static int __init init_vdso(void)
index 72a4642eee2c509ec9576b0bb2a7110cfe8b3584..4a094f7acb3dfe1d9b72f51bb28eb0c31afefd65 100644 (file)
@@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
        /* Set EPC to return to post-branch instruction */
        xcp->cp0_epc = current->thread.bd_emu_cont_pc;
        pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
+       MIPS_FPU_EMU_INC_STATS(ds_emul);
        return true;
 }
index cd72805b64a764c0a33f7d7ffb8c10d8119ac509..fa7d8d3790bfc960bc7d4b358e9fb1a5120c04e2 100644 (file)
@@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
                 * If address-based cache ops don't require an SMP call, then
                 * use them exclusively for small flushes.
                 */
-               size = start - end;
+               size = end - start;
                cache_size = icache_size;
                if (!cpu_has_ic_fills_f_dc) {
                        size *= 2;
index a5509e7dcad2ce0fe8504b67ce325303e51894f8..2c3749d98f04b2f78ee6e70035a6e175d50a819e 100644 (file)
@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
 {
        struct maar_config cfg[BOOT_MEM_MAP_MAX];
        unsigned i, num_configured, num_cfg = 0;
-       phys_addr_t skip;
 
        for (i = 0; i < boot_mem_map.nr_map; i++) {
                switch (boot_mem_map.map[i].type) {
@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
                        continue;
                }
 
-               skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
+               /* Round lower up */
                cfg[num_cfg].lower = boot_mem_map.map[i].addr;
-               cfg[num_cfg].lower += skip;
+               cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
 
-               cfg[num_cfg].upper = cfg[num_cfg].lower;
-               cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
-               cfg[num_cfg].upper -= skip;
+               /* Round upper down */
+               cfg[num_cfg].upper = boot_mem_map.map[i].addr +
+                                       boot_mem_map.map[i].size;
+               cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
 
                cfg[num_cfg].attrs = MIPS_MAAR_S;
                num_cfg++;
index bc0c91e84ca0dfa9dd50a11c1fe610fb19c120b5..38a5c657ffd399fd31c76509360fe2d79dac2f9c 100644 (file)
@@ -124,6 +124,13 @@ static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
                r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
 }
 
+static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
+{
+       unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
+
+       return (resource_flags & flags) == flags;
+}
+
 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
 {
        phb->ioda.pe_array[pe_no].phb = phb;
@@ -2871,7 +2878,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
                res = &pdev->resource[i + PCI_IOV_RESOURCES];
                if (!res->flags || res->parent)
                        continue;
-               if (!pnv_pci_is_m64(phb, res)) {
+               if (!pnv_pci_is_m64_flags(res->flags)) {
                        dev_warn(&pdev->dev, "Don't support SR-IOV with"
                                        " non M64 VF BAR%d: %pR. \n",
                                 i, res);
@@ -3096,7 +3103,7 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
         * alignment for any 64-bit resource, PCIe doesn't care and
         * bridges only do 64-bit prefetchable anyway.
         */
-       if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
+       if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
                return phb->ioda.m64_segsize;
        if (type & IORESOURCE_MEM)
                return phb->ioda.m32_segsize;
index caea2c45f6c2ac43ee6c134c1b4c9a7ebc8295e3..1d159ce50f5ad37a5ae5e7bdd5ca2116dfd26158 100644 (file)
@@ -60,7 +60,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                       \
 "      movco.l %0, @%3                                 \n"             \
 "      bf      1b                                      \n"             \
 "      synco                                           \n"             \
-       : "=&z" (temp), "=&z" (res)                                     \
+       : "=&z" (temp), "=&r" (res)                                     \
        : "r" (i), "r" (&v->counter)                                    \
        : "t");                                                         \
                                                                        \
index bdcd6510992c3338c5a3efba35ef3ef7c606bc6a..982c9e31daca441cc66b0b7fc652f3b4251be7b0 100644 (file)
@@ -455,7 +455,7 @@ int intel_bts_interrupt(void)
         * The only surefire way of knowing if this NMI is ours is by checking
         * the write ptr against the PMI threshold.
         */
-       if (ds->bts_index >= ds->bts_interrupt_threshold)
+       if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
                handled = 1;
 
        /*
@@ -584,7 +584,8 @@ static __init int bts_init(void)
        if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
                return -ENODEV;
 
-       bts_pmu.capabilities    = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;
+       bts_pmu.capabilities    = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
+                                 PERF_PMU_CAP_EXCLUSIVE;
        bts_pmu.task_ctx_nr     = perf_sw_context;
        bts_pmu.event_init      = bts_event_init;
        bts_pmu.add             = bts_event_add;
index 849dc09fa4f0b803b7139015de6b98f7778165bd..e3353c97d0862d2a20bdd060fc229af2de8324bb 100644 (file)
@@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
        }
 }
 
-static int populate_pmd(struct cpa_data *cpa,
-                       unsigned long start, unsigned long end,
-                       unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+static long populate_pmd(struct cpa_data *cpa,
+                        unsigned long start, unsigned long end,
+                        unsigned num_pages, pud_t *pud, pgprot_t pgprot)
 {
-       unsigned int cur_pages = 0;
+       long cur_pages = 0;
        pmd_t *pmd;
        pgprot_t pmd_pgprot;
 
@@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
        return num_pages;
 }
 
-static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
-                       pgprot_t pgprot)
+static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+                        pgprot_t pgprot)
 {
        pud_t *pud;
        unsigned long end;
-       int cur_pages = 0;
+       long cur_pages = 0;
        pgprot_t pud_pgprot;
 
        end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
 
        /* Map trailing leftover */
        if (start < end) {
-               int tmp;
+               long tmp;
 
                pud = pud_offset(pgd, start);
                if (pud_none(*pud))
@@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
        pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
        pud_t *pud = NULL;      /* shut up gcc */
        pgd_t *pgd_entry;
-       int ret;
+       long ret;
 
        pgd_entry = cpa->pgd + pgd_index(addr);
 
@@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
 
 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
 {
-       int ret, numpages = cpa->numpages;
+       unsigned long numpages = cpa->numpages;
+       int ret;
 
        while (numpages) {
                /*
index 677e29e294732560e2a1e66edbaa57a94b5d5cd9..8dd3784eb0752ea6ff4cc0d21f249ca084a323f6 100644 (file)
@@ -245,7 +245,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * text and allocate a new stack because we can't rely on the
         * stack pointer being < 4GB.
         */
-       if (!IS_ENABLED(CONFIG_EFI_MIXED))
+       if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
                return 0;
 
        /*
index 13f5a6c1de76827c3aa2eaab28061581971e0c5c..c207fa9870ebcaaed6bfb02efa70b5f7333954f8 100644 (file)
@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
        if (ret)
                return ERR_PTR(ret);
 
+       /*
+        * Check if the hardware context is actually mapped to anything.
+        * If not tell the caller that it should skip this queue.
+        */
        hctx = q->queue_hw_ctx[hctx_idx];
+       if (!blk_mq_hw_queue_mapped(hctx)) {
+               ret = -EXDEV;
+               goto out_queue_exit;
+       }
        ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
 
        blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
        rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
        if (!rq) {
-               blk_queue_exit(q);
-               return ERR_PTR(-EWOULDBLOCK);
+               ret = -EWOULDBLOCK;
+               goto out_queue_exit;
        }
 
        return rq;
+
+out_queue_exit:
+       blk_queue_exit(q);
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
index f1aba26f47194d2d107717ac769c4e7e7b42bc72..a3ea8260c94c89236f938fb255158be8cb880a73 100644 (file)
@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
        /*
         * If previous slice expired, start a new one otherwise renew/extend
         * existing slice to make sure it is at least throtl_slice interval
-        * long since now.
+        * long since now. New slice is started only for empty throttle group.
+        * If there is queued bio, that means there should be an active
+        * slice and it should be extended instead.
         */
-       if (throtl_slice_used(tg, rw))
+       if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
                throtl_start_new_slice(tg, rw);
        else {
                if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
index 877019a6d3ea81c3e130deb882e5ceccb1fe62ae..8baab4307f7b9a9064fa1234825fca29a34380c6 100644 (file)
@@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
        struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+       unsigned int dst_len;
        unsigned int pos;
-
-       if (err == -EOVERFLOW)
-               /* Decrypted value had no leading 0 byte */
-               err = -EINVAL;
+       u8 *out_buf;
 
        if (err)
                goto done;
 
-       if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
-               err = -EINVAL;
+       err = -EINVAL;
+       dst_len = req_ctx->child_req.dst_len;
+       if (dst_len < ctx->key_size - 1)
                goto done;
+
+       out_buf = req_ctx->out_buf;
+       if (dst_len == ctx->key_size) {
+               if (out_buf[0] != 0x00)
+                       /* Decrypted value had no leading 0 byte */
+                       goto done;
+
+               dst_len--;
+               out_buf++;
        }
 
-       if (req_ctx->out_buf[0] != 0x02) {
-               err = -EINVAL;
+       if (out_buf[0] != 0x02)
                goto done;
-       }
-       for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
-               if (req_ctx->out_buf[pos] == 0x00)
+
+       for (pos = 1; pos < dst_len; pos++)
+               if (out_buf[pos] == 0x00)
                        break;
-       if (pos < 9 || pos == req_ctx->child_req.dst_len) {
-               err = -EINVAL;
+       if (pos < 9 || pos == dst_len)
                goto done;
-       }
        pos++;
 
-       if (req->dst_len < req_ctx->child_req.dst_len - pos)
+       err = 0;
+
+       if (req->dst_len < dst_len - pos)
                err = -EOVERFLOW;
-       req->dst_len = req_ctx->child_req.dst_len - pos;
+       req->dst_len = dst_len - pos;
 
        if (!err)
                sg_copy_from_buffer(req->dst,
                                sg_nents_for_len(req->dst, req->dst_len),
-                               req_ctx->out_buf + pos, req->dst_len);
+                               out_buf + pos, req->dst_len);
 
 done:
        kzfree(req_ctx->out_buf);
index 80cc7c089a15e908ae070d95ad4879e5b6309555..e1d5ea6d5e404a151b3f362926402e1168f8cacd 100644 (file)
@@ -94,54 +94,50 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
        return to_acpi_device(acpi_desc->dev);
 }
 
-static int xlat_status(void *buf, unsigned int cmd)
+static int xlat_status(void *buf, unsigned int cmd, u32 status)
 {
        struct nd_cmd_clear_error *clear_err;
        struct nd_cmd_ars_status *ars_status;
-       struct nd_cmd_ars_start *ars_start;
-       struct nd_cmd_ars_cap *ars_cap;
        u16 flags;
 
        switch (cmd) {
        case ND_CMD_ARS_CAP:
-               ars_cap = buf;
-               if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
+               if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
                        return -ENOTTY;
 
                /* Command failed */
-               if (ars_cap->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
 
                /* No supported scan types for this range */
                flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
-               if ((ars_cap->status >> 16 & flags) == 0)
+               if ((status >> 16 & flags) == 0)
                        return -ENOTTY;
                break;
        case ND_CMD_ARS_START:
-               ars_start = buf;
                /* ARS is in progress */
-               if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
+               if ((status & 0xffff) == NFIT_ARS_START_BUSY)
                        return -EBUSY;
 
                /* Command failed */
-               if (ars_start->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                break;
        case ND_CMD_ARS_STATUS:
                ars_status = buf;
                /* Command failed */
-               if (ars_status->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                /* Check extended status (Upper two bytes) */
-               if (ars_status->status == NFIT_ARS_STATUS_DONE)
+               if (status == NFIT_ARS_STATUS_DONE)
                        return 0;
 
                /* ARS is in progress */
-               if (ars_status->status == NFIT_ARS_STATUS_BUSY)
+               if (status == NFIT_ARS_STATUS_BUSY)
                        return -EBUSY;
 
                /* No ARS performed for the current boot */
-               if (ars_status->status == NFIT_ARS_STATUS_NONE)
+               if (status == NFIT_ARS_STATUS_NONE)
                        return -EAGAIN;
 
                /*
@@ -149,19 +145,19 @@ static int xlat_status(void *buf, unsigned int cmd)
                 * agent wants the scan to stop.  If we didn't overflow
                 * then just continue with the returned results.
                 */
-               if (ars_status->status == NFIT_ARS_STATUS_INTR) {
+               if (status == NFIT_ARS_STATUS_INTR) {
                        if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
                                return -ENOSPC;
                        return 0;
                }
 
                /* Unknown status */
-               if (ars_status->status >> 16)
+               if (status >> 16)
                        return -EIO;
                break;
        case ND_CMD_CLEAR_ERROR:
                clear_err = buf;
-               if (clear_err->status & 0xffff)
+               if (status & 0xffff)
                        return -EIO;
                if (!clear_err->cleared)
                        return -EIO;
@@ -172,6 +168,9 @@ static int xlat_status(void *buf, unsigned int cmd)
                break;
        }
 
+       /* all other non-zero status results in an error */
+       if (status)
+               return -EIO;
        return 0;
 }
 
@@ -186,10 +185,10 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
        struct nd_cmd_pkg *call_pkg = NULL;
        const char *cmd_name, *dimm_name;
        unsigned long cmd_mask, dsm_mask;
+       u32 offset, fw_status = 0;
        acpi_handle handle;
        unsigned int func;
        const u8 *uuid;
-       u32 offset;
        int rc, i;
 
        func = cmd;
@@ -317,6 +316,15 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                out_obj->buffer.pointer + offset, out_size);
                offset += out_size;
        }
+
+       /*
+        * Set fw_status for all the commands with a known format to be
+        * later interpreted by xlat_status().
+        */
+       if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
+                       || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
+               fw_status = *(u32 *) out_obj->buffer.pointer;
+
        if (offset + in_buf.buffer.length < buf_len) {
                if (i >= 1) {
                        /*
@@ -325,7 +333,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                         */
                        rc = buf_len - offset - in_buf.buffer.length;
                        if (cmd_rc)
-                               *cmd_rc = xlat_status(buf, cmd);
+                               *cmd_rc = xlat_status(buf, cmd, fw_status);
                } else {
                        dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
                                        __func__, dimm_name, cmd_name, buf_len,
@@ -335,7 +343,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
        } else {
                rc = 0;
                if (cmd_rc)
-                       *cmd_rc = xlat_status(buf, cmd);
+                       *cmd_rc = xlat_status(buf, cmd, fw_status);
        }
 
  out:
index 25d26bb18970694d808a3d679e613a5487fa8236..e964d068874dea77f0282d58e0dd97501f7481a6 100644 (file)
@@ -1475,7 +1475,11 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 
                kfree(buf);
        } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
-               regcache_drop_region(map, reg, reg + 1);
+               /* regcache_drop_region() takes lock that we already have,
+                * thus call map->cache_ops->drop() directly
+                */
+               if (map->cache_ops && map->cache_ops->drop)
+                       map->cache_ops->drop(map, reg, reg + 1);
        }
 
        trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
index df7ab2458e5016d0da9da40b9658deb80f72d4dd..39c01b942ee483ef11e350aa43a44f1d7b5786d9 100644 (file)
@@ -1708,11 +1708,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
        DRM_INFO("amdgpu: finishing device.\n");
        adev->shutdown = true;
+       drm_crtc_force_disable_all(adev->ddev);
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
        amdgpu_fence_driver_fini(adev);
-       drm_crtc_force_disable_all(adev->ddev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
        kfree(adev->ip_block_status);
index 7ea8aa7ca40859b0bcdf4cca65f57e4ba1caacd9..6bc712f32c8b4a0850e7445c7bb2a096464cf90d 100644 (file)
@@ -175,6 +175,7 @@ struct nvkm_device_func {
        void (*fini)(struct nvkm_device *, bool suspend);
        resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
        resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+       bool cpu_coherent;
 };
 
 struct nvkm_device_quirk {
index 6190035edfeaa2af9ae30063bad3f73aca822f7d..864323b19cf7f993a8fc643abaa7acad76bd459a 100644 (file)
@@ -209,7 +209,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->tile_flags = tile_flags;
        nvbo->bo.bdev = &drm->ttm.bdev;
 
-       nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+       if (!nvxx_device(&drm->device)->func->cpu_coherent)
+               nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
        nvbo->page_shift = 12;
        if (drm->client.vm) {
index b1b693219db3f70edbf29ae5f508141937e84e42..62ad0300cfa5ab0af8610773ae7d5c4b49e310bb 100644 (file)
@@ -1614,6 +1614,7 @@ nvkm_device_pci_func = {
        .fini = nvkm_device_pci_fini,
        .resource_addr = nvkm_device_pci_resource_addr,
        .resource_size = nvkm_device_pci_resource_size,
+       .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
 };
 
 int
index 939682f18788feda2d5b30b045140bc696e476db..9b638bd905ffa471e7cb0cfb04bf0d3274c211ce 100644 (file)
@@ -245,6 +245,7 @@ nvkm_device_tegra_func = {
        .fini = nvkm_device_tegra_fini,
        .resource_addr = nvkm_device_tegra_resource_addr,
        .resource_size = nvkm_device_tegra_resource_size,
+       .cpu_coherent = false,
 };
 
 int
index edec30fd3ecd682174acab05f15f0cd18db68331..0a7b6ed5ed284fc56fa518dceb9ae050643cce60 100644 (file)
@@ -37,7 +37,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
 {
        struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
        struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+
+       mutex_lock(&chan->fifo->base.engine.subdev.mutex);
        nvkm_ramht_remove(imem->ramht, cookie);
+       mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
 }
 
 static int
index e6abc09b67e3e63e82fea3bbda2b1c91234c525c..1f78ec2548ec69403e12b0d136c3a5564ee224aa 100644 (file)
@@ -3015,6 +3015,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        if (rdev->pdev->device == 0x6811 &&
            rdev->pdev->revision == 0x81)
                max_mclk = 120000;
+       /* limit sclk/mclk on Jet parts for stability */
+       if (rdev->pdev->device == 0x6665 &&
+           rdev->pdev->revision == 0xc3) {
+               max_sclk = 75000;
+               max_mclk = 80000;
+       }
 
        if (rps->vce_active) {
                rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
index 9688bfa92ccd33545982dbeedbdc8154c3ca38f1..611b6b9bb3cb02adaa16767e47965b92ae5d5a93 100644 (file)
@@ -122,7 +122,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                return 0;
        cmd = urb->transfer_buffer;
 
-       for (i = y; i < height ; i++) {
+       for (i = y; i < y + height ; i++) {
                const int line_offset = fb->base.pitches[0] * i;
                const int byte_offset = line_offset + (x * bpp);
                const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
index 137125b5eae77ca07fae38d52475341b01df5c76..5ce71ce7b6c43b7ce8fd7e7586127650c5e2e508 100644 (file)
@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
        /* Set the number of I2C channel instance */
        adap_info->ch_num = id->driver_data;
 
-       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-                 KBUILD_MODNAME, adap_info);
-       if (ret) {
-               pch_pci_err(pdev, "request_irq FAILED\n");
-               goto err_request_irq;
-       }
-
        for (i = 0; i < adap_info->ch_num; i++) {
                pch_adap = &adap_info->pch_data[i].pch_adapter;
                adap_info->pch_i2c_suspended = false;
@@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->dev.of_node = pdev->dev.of_node;
                pch_adap->dev.parent = &pdev->dev;
+       }
+
+       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+                 KBUILD_MODNAME, adap_info);
+       if (ret) {
+               pch_pci_err(pdev, "request_irq FAILED\n");
+               goto err_request_irq;
+       }
+
+       for (i = 0; i < adap_info->ch_num; i++) {
+               pch_adap = &adap_info->pch_data[i].pch_adapter;
 
                pch_i2c_init(&adap_info->pch_data[i]);
 
index 501bd15cb78e0eddab711efcb3ea6cd86515f5bb..a8497cfdae6f40a4de6bd4c57cac4d8f3789967f 100644 (file)
@@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
 #ifdef CONFIG_PM_SLEEP
 static int qup_i2c_suspend(struct device *device)
 {
-       qup_i2c_pm_suspend_runtime(device);
+       if (!pm_runtime_suspended(device))
+               return qup_i2c_pm_suspend_runtime(device);
        return 0;
 }
 
index 528e755c468f36a8cf7cf1f693878e4482b17a93..3278ebf1cc5ccda15321f46bd658a03f51273557 100644 (file)
@@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
        /* Only select the channel if its different from the last channel */
        if (data->last_chan != regval) {
                ret = pca954x_reg_write(muxc->parent, client, regval);
-               data->last_chan = regval;
+               data->last_chan = ret ? 0 : regval;
        }
 
        return ret;
index 5d11fea3c8eccd10dbd892d4898d4b0ecca3aacd..f3135ae22df429c743c771c40c29d049869091f5 100644 (file)
@@ -946,6 +946,12 @@ static const struct input_device_id joydev_ids[] = {
                .evbit = { BIT_MASK(EV_ABS) },
                .absbit = { BIT_MASK(ABS_X) },
        },
+       {
+               .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+                               INPUT_DEVICE_ID_MATCH_ABSBIT,
+               .evbit = { BIT_MASK(EV_ABS) },
+               .absbit = { BIT_MASK(ABS_Z) },
+       },
        {
                .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
                                INPUT_DEVICE_ID_MATCH_ABSBIT,
index b2744a64e9331e2bd938a9ea60f40074118e62aa..f502c8488be86361592187acdffdecb164c4dfbd 100644 (file)
@@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
                data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
        }
 
-       error = device_property_read_string(dev, "touchscreen-fw-name", &str);
+       error = device_property_read_string(dev, "firmware-name", &str);
        if (!error)
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s", str);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s", str);
        else
                dev_dbg(dev, "Firmware file name read error. Using default.");
 }
@@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
                if (!acpi_id)
                        return -ENODEV;
 
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
-                       acpi_id->id);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s.fw", acpi_id->id);
 
                for (i = 0; i < strlen(data->fw_name); i++)
                        data->fw_name[i] = tolower(data->fw_name[i]);
        } else {
-               snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
-                       id->name);
+               snprintf(data->fw_name, sizeof(data->fw_name),
+                        "silead/%s.fw", id->name);
        }
 
        return 0;
@@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
 static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
                                         const struct i2c_device_id *id)
 {
-       snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name);
+       snprintf(data->fw_name, sizeof(data->fw_name),
+                "silead/%s.fw", id->name);
        return 0;
 }
 #endif
index ede5672ab34d45b544ef9d35c53e0ca1e990835e..da6c0ba61d4f4c0bf73f323e735ab1386ed7f90e 100644 (file)
@@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
                                   unsigned long cluster_id)
 {
-       int cpu = *base_cpu;
+       int next_cpu, cpu = *base_cpu;
        unsigned long mpidr = cpu_logical_map(cpu);
        u16 tlist = 0;
 
@@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
 
                tlist |= 1 << (mpidr & 0xf);
 
-               cpu = cpumask_next(cpu, mask);
-               if (cpu >= nr_cpu_ids)
+               next_cpu = cpumask_next(cpu, mask);
+               if (next_cpu >= nr_cpu_ids)
                        goto out;
+               cpu = next_cpu;
 
                mpidr = cpu_logical_map(cpu);
 
index 83f498393a7f0b5d82264cfd3cd340f0cf6c3c1f..6185696405d5b64181a28d671b6797743768b9f1 100644 (file)
@@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
        if (!gic_local_irq_is_routable(intr))
                return -EPERM;
 
-       /*
-        * HACK: These are all really percpu interrupts, but the rest
-        * of the MIPS kernel code does not use the percpu IRQ API for
-        * the CP0 timer and performance counter interrupts.
-        */
-       switch (intr) {
-       case GIC_LOCAL_INT_TIMER:
-       case GIC_LOCAL_INT_PERFCTR:
-       case GIC_LOCAL_INT_FDC:
-               irq_set_chip_and_handler(virq,
-                                        &gic_all_vpes_local_irq_controller,
-                                        handle_percpu_irq);
-               break;
-       default:
-               irq_set_chip_and_handler(virq,
-                                        &gic_local_irq_controller,
-                                        handle_percpu_devid_irq);
-               irq_set_percpu_devid(virq);
-               break;
-       }
-
        spin_lock_irqsave(&gic_lock, flags);
        for (i = 0; i < gic_vpes; i++) {
                u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@@ -724,16 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        return 0;
 }
 
-static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
-                             irq_hw_number_t hw)
+static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
+                             unsigned int hwirq)
 {
-       if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
-               return gic_local_irq_domain_map(d, virq, hw);
+       struct irq_chip *chip;
+       int err;
+
+       if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+               err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                   &gic_level_irq_controller,
+                                                   NULL);
+       } else {
+               switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
+               case GIC_LOCAL_INT_TIMER:
+               case GIC_LOCAL_INT_PERFCTR:
+               case GIC_LOCAL_INT_FDC:
+                       /*
+                        * HACK: These are all really percpu interrupts, but
+                        * the rest of the MIPS kernel code does not use the
+                        * percpu IRQ API for them.
+                        */
+                       chip = &gic_all_vpes_local_irq_controller;
+                       irq_set_handler(virq, handle_percpu_irq);
+                       break;
+
+               default:
+                       chip = &gic_local_irq_controller;
+                       irq_set_handler(virq, handle_percpu_devid_irq);
+                       irq_set_percpu_devid(virq);
+                       break;
+               }
 
-       irq_set_chip_and_handler(virq, &gic_level_irq_controller,
-                                handle_level_irq);
+               err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                   chip, NULL);
+       }
 
-       return gic_shared_irq_domain_map(d, virq, hw, 0);
+       return err;
 }
 
 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -744,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
        int cpu, ret, i;
 
        if (spec->type == GIC_DEVICE) {
-               /* verify that it doesn't conflict with an IPI irq */
-               if (test_bit(spec->hwirq, ipi_resrv))
+               /* verify that shared irqs don't conflict with an IPI irq */
+               if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
+                   test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
                        return -EBUSY;
 
-               hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
-
-               return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
-                                                    &gic_level_irq_controller,
-                                                    NULL);
+               return gic_setup_dev_chip(d, virq, spec->hwirq);
        } else {
                base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
                if (base_hwirq == gic_shared_intrs) {
@@ -821,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
 }
 
 static const struct irq_domain_ops gic_irq_domain_ops = {
-       .map = gic_irq_domain_map,
        .alloc = gic_irq_domain_alloc,
        .free = gic_irq_domain_free,
        .match = gic_irq_domain_match,
@@ -852,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
        struct irq_fwspec *fwspec = arg;
        struct gic_irq_spec spec = {
                .type = GIC_DEVICE,
-               .hwirq = fwspec->param[1],
        };
        int i, ret;
-       bool is_shared = fwspec->param[0] == GIC_SHARED;
 
-       if (is_shared) {
-               ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
-               if (ret)
-                       return ret;
-       }
-
-       for (i = 0; i < nr_irqs; i++) {
-               irq_hw_number_t hwirq;
+       if (fwspec->param[0] == GIC_SHARED)
+               spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
+       else
+               spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
 
-               if (is_shared)
-                       hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
-               else
-                       hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
+       ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+       if (ret)
+               return ret;
 
-               ret = irq_domain_set_hwirq_and_chip(d, virq + i,
-                                                   hwirq,
-                                                   &gic_level_irq_controller,
-                                                   NULL);
+       for (i = 0; i < nr_irqs; i++) {
+               ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
                if (ret)
                        goto error;
        }
@@ -896,7 +888,10 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
 static void gic_dev_domain_activate(struct irq_domain *domain,
                                    struct irq_data *d)
 {
-       gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
+       if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
+               gic_local_irq_domain_map(domain, d->irq, d->hwirq);
+       else
+               gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
 }
 
 static struct irq_domain_ops gic_dev_domain_ops = {
index 32380d5d4f6b15440497b952b035cfe3b76b74ea..767af2026f8b4660511d57d3cfce608c3f9d9bac 100644 (file)
@@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 
                div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
 
-               dev_info(&slot->mmc->class_dev,
-                        "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
-                        slot->id, host->bus_hz, clock,
-                        div ? ((host->bus_hz / div) >> 1) :
-                        host->bus_hz, div);
+               if (clock != slot->__clk_old || force_clkinit)
+                       dev_info(&slot->mmc->class_dev,
+                                "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+                                slot->id, host->bus_hz, clock,
+                                div ? ((host->bus_hz / div) >> 1) :
+                                host->bus_hz, div);
 
                /* disable clock */
                mci_writel(host, CLKENA, 0);
@@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
 
                /* inform CIU */
                mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+
+               /* keep the last clock value that was requested from core */
+               slot->__clk_old = clock;
        }
 
        host->current_speed = clock;
index 9e740bc232a8bed09ef7ee668a8a3abdef6e93d1..e8cd2dec3263d308ed0ebc9f52271c4b72bc5f05 100644 (file)
@@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
  * @queue_node: List node for placing this node in the @queue list of
  *     &struct dw_mci.
  * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @__clk_old: The last clock value that was requested from core.
+ *     Keeping track of this helps us to avoid spamming the console.
  * @flags: Random state bits associated with the slot.
  * @id: Number of this slot.
  * @sdio_id: Number of this slot in the SDIO interrupt registers.
@@ -263,6 +265,7 @@ struct dw_mci_slot {
        struct list_head        queue_node;
 
        unsigned int            clock;
+       unsigned int            __clk_old;
 
        unsigned long           flags;
 #define DW_MMC_CARD_PRESENT    0
index cc07ba0f044deeb167772c529a635236377d983b..27fa8b87cd5fc6a779e0509ce78c010527747268 100644 (file)
@@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
        unsigned long flags;
        u32 val;
 
+       /* Reset ECC hardware */
+       davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+
        spin_lock_irqsave(&davinci_nand_lock, flags);
 
        /* Start 4-bit ECC calculation for read/write */
index 25a4fbd4d24ae5c0e6773b9aebd763ec4efde552..d54f666417e183c4f148826db042931b5d81f47c 100644 (file)
@@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
                   u8 *data, u32 bytes)
 {
        dma_addr_t addr;
-       u32 *p, len, i;
+       u8 *p;
+       u32 len, i, val;
        int ret = 0;
 
        addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
 
        /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
        len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
-       p = (u32 *)(data + bytes);
+       p = data + bytes;
 
        /* write the parity bytes generated by the ECC back to the OOB region */
-       for (i = 0; i < len; i++)
-               p[i] = readl(ecc->regs + ECC_ENCPAR(i));
+       for (i = 0; i < len; i++) {
+               if ((i % 4) == 0)
+                       val = readl(ecc->regs + ECC_ENCPAR(i / 4));
+               p[i] = (val >> ((i % 4) * 8)) & 0xff;
+       }
 timeout:
 
        dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
index ddaa2acb9dd7f716a2f5da80857af78f4f3a4cfa..5223a2182ee44dfbfbda6f578266390f52d4f8b6 100644 (file)
@@ -93,6 +93,9 @@
 #define                NFI_FSM_MASK            (0xf << 16)
 #define NFI_ADDRCNTR           (0x70)
 #define                CNTR_MASK               GENMASK(16, 12)
+#define                ADDRCNTR_SEC_SHIFT      (12)
+#define                ADDRCNTR_SEC(val) \
+               (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
 #define NFI_STRADDR            (0x80)
 #define NFI_BYTELEN            (0x84)
 #define NFI_CSEL               (0x90)
@@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
        }
 
        ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
-                                       (reg & CNTR_MASK) >= chip->ecc.steps,
+                                       ADDRCNTR_SEC(reg) >= chip->ecc.steps,
                                        10, MTK_TIMEOUT);
        if (ret)
                dev_err(dev, "hwecc write timeout\n");
@@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
                dev_warn(nfc->dev, "read ahb/dma done timeout\n");
 
        rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
-                                      (reg & CNTR_MASK) >= sectors, 10,
+                                      ADDRCNTR_SEC(reg) >= sectors, 10,
                                       MTK_TIMEOUT);
        if (rc < 0) {
                dev_err(nfc->dev, "subpage done timeout\n");
index 5173fadc9a4e637f01817ed040a42b72af1d7a68..57cbe2b83849940aa825128c97b091bc92b3e99c 100644 (file)
@@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
        struct nand_chip *nand_chip = mtd_to_nand(mtd);
        int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
 
-       if (section > nand_chip->ecc.steps)
+       if (section >= nand_chip->ecc.steps)
                return -ERANGE;
 
        if (!section) {
index a59361c36f404ff08fa7bc314777203b7dc22a93..5513bfd9cdc90ed8e2628e311b7a92e02bfe0cd7 100644 (file)
@@ -2169,7 +2169,7 @@ scan_tail:
        return 0;
 
 return_error:
-       if (info->dma)
+       if (!IS_ERR_OR_NULL(info->dma))
                dma_release_channel(info->dma);
        if (nand_chip->ecc.priv) {
                nand_bch_free(nand_chip->ecc.priv);
index 715583f69d28ae2f413768c81c141e1dd3019d75..4d7bbd2df5c01229fb4592c9bab7e8b3b1fd0147 100644 (file)
@@ -99,8 +99,11 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
        nvdimm_map->size = size;
        kref_init(&nvdimm_map->kref);
 
-       if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
+       if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
+               dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
+                               &offset, size, dev_name(dev));
                goto err_request_region;
+       }
 
        if (flags)
                nvdimm_map->mem = memremap(offset, size, flags);
@@ -171,6 +174,9 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
                kref_get(&nvdimm_map->kref);
        nvdimm_bus_unlock(dev);
 
+       if (!nvdimm_map)
+               return NULL;
+
        if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
                return NULL;
 
index 8024a0ef86d3af9f0ba5ef169260e2e342023d8e..0b78a8211f4a8c5479125b5b58e823f4c26a1321 100644 (file)
@@ -52,10 +52,28 @@ struct nvdimm_drvdata {
 struct nd_region_data {
        int ns_count;
        int ns_active;
-       unsigned int flush_mask;
-       void __iomem *flush_wpq[0][0];
+       unsigned int hints_shift;
+       void __iomem *flush_wpq[0];
 };
 
+static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
+               int dimm, int hint)
+{
+       unsigned int num = 1 << ndrd->hints_shift;
+       unsigned int mask = num - 1;
+
+       return ndrd->flush_wpq[dimm * num + (hint & mask)];
+}
+
+static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
+               int hint, void __iomem *flush)
+{
+       unsigned int num = 1 << ndrd->hints_shift;
+       unsigned int mask = num - 1;
+
+       ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
+}
+
 static inline struct nd_namespace_index *to_namespace_index(
                struct nvdimm_drvdata *ndd, int i)
 {
index e8d5ba7b29af98f647b119640e79e581996cfdc0..4c0ac4abb62926e5ae131b78545a63d4088f5c99 100644 (file)
@@ -38,7 +38,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 
        dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
                        nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
-       for (i = 0; i < nvdimm->num_flush; i++) {
+       for (i = 0; i < (1 << ndrd->hints_shift); i++) {
                struct resource *res = &nvdimm->flush_wpq[i];
                unsigned long pfn = PHYS_PFN(res->start);
                void __iomem *flush_page;
@@ -54,14 +54,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 
                if (j < i)
                        flush_page = (void __iomem *) ((unsigned long)
-                                       ndrd->flush_wpq[dimm][j] & PAGE_MASK);
+                                       ndrd_get_flush_wpq(ndrd, dimm, j)
+                                       & PAGE_MASK);
                else
                        flush_page = devm_nvdimm_ioremap(dev,
-                                       PHYS_PFN(pfn), PAGE_SIZE);
+                                       PFN_PHYS(pfn), PAGE_SIZE);
                if (!flush_page)
                        return -ENXIO;
-               ndrd->flush_wpq[dimm][i] = flush_page
-                       + (res->start & ~PAGE_MASK);
+               ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
+                               + (res->start & ~PAGE_MASK));
        }
 
        return 0;
@@ -93,7 +94,10 @@ int nd_region_activate(struct nd_region *nd_region)
                return -ENOMEM;
        dev_set_drvdata(dev, ndrd);
 
-       ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;
+       if (!num_flush)
+               return 0;
+
+       ndrd->hints_shift = ilog2(num_flush);
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -900,8 +904,8 @@ void nvdimm_flush(struct nd_region *nd_region)
         */
        wmb();
        for (i = 0; i < nd_region->ndr_mappings; i++)
-               if (ndrd->flush_wpq[i][0])
-                       writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);
+               if (ndrd_get_flush_wpq(ndrd, i, 0))
+                       writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
        wmb();
 }
 EXPORT_SYMBOL_GPL(nvdimm_flush);
@@ -925,7 +929,7 @@ int nvdimm_has_flush(struct nd_region *nd_region)
 
        for (i = 0; i < nd_region->ndr_mappings; i++)
                /* flush hints present, flushing required */
-               if (ndrd->flush_wpq[i][0])
+               if (ndrd_get_flush_wpq(ndrd, i, 0))
                        return 1;
 
        /*
index c2c2c28e6eb59fbd45dfc278354c65626d9b0f95..fbdb2267e4603499021b39382d932c571874c505 100644 (file)
@@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
 
        queue = &ctrl->queues[idx];
        queue->ctrl = ctrl;
-       queue->flags = 0;
        init_completion(&queue->cm_done);
 
        if (idx > 0)
@@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
                goto out_destroy_cm_id;
        }
 
+       clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
        set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
 
        return 0;
index ba9af4a2bd2ab3159ed415b5acc73328925aa3c3..ec6381e57eb737cebbae0689595979ac6ee9628b 100644 (file)
@@ -486,6 +486,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        else
                shost->dma_boundary = 0xffffffff;
 
+       shost->use_blk_mq = scsi_use_blk_mq;
+
        device_initialize(&shost->shost_gendev);
        dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
        shost->shost_gendev.bus = &scsi_bus_type;
index 1f36aca44394eaf49a8edc0ce78d6db371288cc8..1deb6adc411f795833a444a84b1e80e9f9629c4d 100644 (file)
@@ -1160,7 +1160,6 @@ bool scsi_use_blk_mq = true;
 bool scsi_use_blk_mq = false;
 #endif
 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
-EXPORT_SYMBOL_GPL(scsi_use_blk_mq);
 
 static int __init init_scsi(void)
 {
index 57a4b9973320f159d13730214f3e1f7c6d7b24eb..85c8a51bc5637701cc2b2d79f1b678064cb628ca 100644 (file)
@@ -29,6 +29,7 @@ extern int scsi_init_hosts(void);
 extern void scsi_exit_hosts(void);
 
 /* scsi.c */
+extern bool scsi_use_blk_mq;
 extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
 extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
 #ifdef CONFIG_SCSI_LOGGING
index 38c2df84cabd0cf2116239d1cf08d1d18709890f..665da8f66ff18ae5260f892b60a4bafb4fe323fa 100644 (file)
@@ -4271,13 +4271,10 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
        if (ret < 0)
                return ret;
 
-       /*
-        * Use new btrfs_qgroup_reserve_data to reserve precious data space
-        *
-        * TODO: Find a good method to avoid reserve data space for NOCOW
-        * range, but don't impact performance on quota disable case.
-        */
+       /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
        ret = btrfs_qgroup_reserve_data(inode, start, len);
+       if (ret)
+               btrfs_free_reserved_data_space_noquota(inode, start, len);
        return ret;
 }
 
index b2a2da5893af5273313b3fc45765801831ce5d91..7fd939bfbd99359b3ffab8b9689d8fdd2b313aeb 100644 (file)
@@ -1634,6 +1634,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
        int namelen;
        int ret = 0;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        ret = mnt_want_write_file(file);
        if (ret)
                goto out;
@@ -1691,6 +1694,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
        struct btrfs_ioctl_vol_args *vol_args;
        int ret;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
@@ -1714,6 +1720,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
        bool readonly = false;
        struct btrfs_qgroup_inherit *inherit = NULL;
 
+       if (!S_ISDIR(file_inode(file)->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
@@ -2357,6 +2366,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
        int ret;
        int err = 0;
 
+       if (!S_ISDIR(dir->i_mode))
+               return -ENOTDIR;
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
        if (IS_ERR(vol_args))
                return PTR_ERR(vol_args);
index c30cf49b69d2f3de59e61ef273478d797cccc1b7..2c6312db85168050f6ee35d0e82465d994e7a1ea 100644 (file)
@@ -333,6 +333,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
                if (bin_attr->cb_max_size &&
                        *ppos + count > bin_attr->cb_max_size) {
                        len = -EFBIG;
+                       goto out;
                }
 
                tbuf = vmalloc(*ppos + count);
index 98d36548153dc3696619b2923e2eecce35bd6bbc..bbb4b3e5b4ffc5bbf52fc5d4206f52995a988ea1 100644 (file)
@@ -1842,6 +1842,16 @@ out_commit:
        ocfs2_commit_trans(osb, handle);
 
 out:
+       /*
+        * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
+        * even in case of error here like ENOSPC and ENOMEM. So, we need
+        * to unlock the target page manually to prevent deadlocks when
+        * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
+        * to VM code.
+        */
+       if (wc->w_target_locked)
+               unlock_page(mmap_page);
+
        ocfs2_free_write_ctxt(inode, wc);
 
        if (data_ac) {
index 66533e18276cf00e86de1fa8f9251c30f921fcf7..dc69df04abc1d449fdc0948a00dd320ebe882327 100644 (file)
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
 #define dma_mmap_writecombine dma_mmap_wc
 #endif
 
-#ifdef CONFIG_NEED_DMA_MAP_STATE
+#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
index 7e3d53753612205dc4ef61cb857124f2499f5894..01e84436cddfec1da8b871b6343d72a2420ba070 100644 (file)
@@ -620,6 +620,7 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
                return __get_user(c, end);
        }
 
+       (void)c;
        return 0;
 }
 
index 3a2f9ae25c86c43827cdcd0c4602cd5e281a4cf9..856e50b2140ce76971b481a9d61777e10c5ea4d0 100644 (file)
@@ -190,7 +190,7 @@ struct property_entry {
        .length = ARRAY_SIZE(_val_) * sizeof(_type_),           \
        .is_array = true,                                       \
        .is_string = false,                                     \
-       { .pointer = { _type_##_data = _val_ } },               \
+       { .pointer = { ._type_##_data = _val_ } },              \
 }
 
 #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_)                 \
index b17cc4830fa670512abdc3987f58fc55e583f0bc..4a529c984a3fa5e67e3e1f5010c8b17666dab480 100644 (file)
@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
 
 static inline void workingset_node_pages_dec(struct radix_tree_node *node)
 {
+       VM_BUG_ON(!workingset_node_pages(node));
        node->count--;
 }
 
@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
 
 static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
 {
+       VM_BUG_ON(!workingset_node_shadows(node));
        node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
 }
 
index 0dee7afa93d6621881b2f6a4afb6828a4b5f8d63..7e4cd53139ed518a89795ea360bf30cce0ce1377 100644 (file)
@@ -771,12 +771,9 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
                shost->tmf_in_progress;
 }
 
-extern bool scsi_use_blk_mq;
-
 static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
 {
-       return scsi_use_blk_mq;
-
+       return shost->use_blk_mq;
 }
 
 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
index 5e8dab5bf9adb93205781083a6cf5312da140fdc..d6b729beba4930bbb1a1ddc4a6a6b8533bbf94e0 100644 (file)
@@ -3446,9 +3446,28 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
         * Except for the root, subtree_control must be zero for a cgroup
         * with tasks so that child cgroups don't compete against tasks.
         */
-       if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
-               ret = -EBUSY;
-               goto out_unlock;
+       if (enable && cgroup_parent(cgrp)) {
+               struct cgrp_cset_link *link;
+
+               /*
+                * Because namespaces pin csets too, @cgrp->cset_links
+                * might not be empty even when @cgrp is empty.  Walk and
+                * verify each cset.
+                */
+               spin_lock_irq(&css_set_lock);
+
+               ret = 0;
+               list_for_each_entry(link, &cgrp->cset_links, cset_link) {
+                       if (css_set_populated(link->cset)) {
+                               ret = -EBUSY;
+                               break;
+                       }
+               }
+
+               spin_unlock_irq(&css_set_lock);
+
+               if (ret)
+                       goto out_unlock;
        }
 
        /* save and update control masks and prepare csses */
@@ -3899,7 +3918,9 @@ void cgroup_file_notify(struct cgroup_file *cfile)
  * cgroup_task_count - count the number of tasks in a cgroup.
  * @cgrp: the cgroup in question
  *
- * Return the number of tasks in the cgroup.
+ * Return the number of tasks in the cgroup.  The returned number can be
+ * higher than the actual number of tasks due to css_set references from
+ * namespace roots and temporary usages.
  */
 static int cgroup_task_count(const struct cgroup *cgrp)
 {
index c27e53326befe9f33ffc605a75a48cf8e9ae6624..2b4c20ab5bbe170b8762b783e61df37a38368477 100644 (file)
@@ -325,8 +325,7 @@ static struct file_system_type cpuset_fs_type = {
 /*
  * Return in pmask the portion of a cpusets's cpus_allowed that
  * are online.  If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus.  The top
- * cpuset always has some cpus online.
+ * until we find one that does have some online cpus.
  *
  * One way or another, we guarantee to return some non-empty subset
  * of cpu_online_mask.
@@ -335,8 +334,20 @@ static struct file_system_type cpuset_fs_type = {
  */
 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
 {
-       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
+       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
                cs = parent_cs(cs);
+               if (unlikely(!cs)) {
+                       /*
+                        * The top cpuset doesn't have any online cpu as a
+                        * consequence of a race between cpuset_hotplug_work
+                        * and cpu hotplug notifier.  But we know the top
+                        * cpuset's effective_cpus is on its way to to be
+                        * identical to cpu_online_mask.
+                        */
+                       cpumask_copy(pmask, cpu_online_mask);
+                       return;
+               }
+       }
        cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
 }
 
@@ -2074,7 +2085,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
  * which could have been changed by cpuset just after it inherits the
  * state from the parent and before it sits on the cgroup's task list.
  */
-void cpuset_fork(struct task_struct *task)
+static void cpuset_fork(struct task_struct *task)
 {
        if (task_css_is_root(task, cpuset_cgrp_id))
                return;
index a54f2c2cdb20735f67e48738c02ef2d85afa5019..fc9bb22252913acd34cf35be26fe5777a43c4506 100644 (file)
@@ -3929,7 +3929,7 @@ static void exclusive_event_destroy(struct perf_event *event)
 
 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
 {
-       if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
+       if ((e1->pmu == e2->pmu) &&
            (e1->cpu == e2->cpu ||
             e1->cpu == -1 ||
             e2->cpu == -1))
index 637389088b3f9cf88db58ce920350a3517a20ed2..26ba5654d9d5bea8c741d3132f184fe9f0874f0f 100644 (file)
@@ -820,6 +820,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
        desc->name = name;
 
        if (handle != handle_bad_irq && is_chained) {
+               unsigned int type = irqd_get_trigger_type(&desc->irq_data);
+
                /*
                 * We're about to start this interrupt immediately,
                 * hence the need to set the trigger configuration.
@@ -828,8 +830,10 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
                 * chained interrupt. Reset it immediately because we
                 * do know better.
                 */
-               __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data));
-               desc->handle_irq = handle;
+               if (type != IRQ_TYPE_NONE) {
+                       __irq_set_trigger(desc, type);
+                       desc->handle_irq = handle;
+               }
 
                irq_settings_set_noprobe(desc);
                irq_settings_set_norequest(desc);
index dade4c9559cc036c1b6aa8567abf0b0887847923..7bc56762ca352fd47897690ef875bb4c05b7eed2 100644 (file)
@@ -5124,19 +5124,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        struct trace_iterator *iter = filp->private_data;
        ssize_t sret;
 
-       /* return any leftover data */
-       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
-       if (sret != -EBUSY)
-               return sret;
-
-       trace_seq_init(&iter->seq);
-
        /*
         * Avoid more than one consumer on a single file descriptor
         * This is just a matter of traces coherency, the ring buffer itself
         * is protected.
         */
        mutex_lock(&iter->mutex);
+
+       /* return any leftover data */
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               goto out;
+
+       trace_seq_init(&iter->seq);
+
        if (iter->trace->read) {
                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
                if (sret)
@@ -6163,9 +6164,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                return -EBUSY;
 #endif
 
-       if (splice_grow_spd(pipe, &spd))
-               return -ENOMEM;
-
        if (*ppos & (PAGE_SIZE - 1))
                return -EINVAL;
 
@@ -6175,6 +6173,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                len &= PAGE_MASK;
        }
 
+       if (splice_grow_spd(pipe, &spd))
+               return -ENOMEM;
+
  again:
        trace_access_lock(iter->cpu_file);
        entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
@@ -6232,19 +6233,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        /* did we read anything? */
        if (!spd.nr_pages) {
                if (ret)
-                       return ret;
+                       goto out;
 
+               ret = -EAGAIN;
                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
-                       return -EAGAIN;
+                       goto out;
 
                ret = wait_on_pipe(iter, true);
                if (ret)
-                       return ret;
+                       goto out;
 
                goto again;
        }
 
        ret = splice_to_pipe(pipe, &spd);
+out:
        splice_shrink_spd(&spd);
 
        return ret;
index 2e2cca5092318faf62d10ed96a3b7d0a8b9cc129..cab7405f48d24f7048b1494f53caa157998b4cea 100644 (file)
@@ -821,7 +821,7 @@ config DETECT_HUNG_TASK
        help
          Say Y here to enable the kernel to detect "hung tasks",
          which are bugs that cause the task to be stuck in
-         uninterruptible "D" state indefinitiley.
+         uninterruptible "D" state indefinitely.
 
          When a hung task is detected, the kernel will print the
          current stack trace (which you should report), but the
index 1b7bf73141418f5f1427b14e806c9a9d4c6e7582..91f0727e3cada11bd721afdeeb112dd196546637 100644 (file)
@@ -105,10 +105,10 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
 
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
        if (radix_tree_is_internal_node(entry)) {
-               unsigned long siboff = get_slot_offset(parent, entry);
-               if (siboff < RADIX_TREE_MAP_SIZE) {
-                       offset = siboff;
-                       entry = rcu_dereference_raw(parent->slots[offset]);
+               if (is_sibling_entry(parent, entry)) {
+                       void **sibentry = (void **) entry_to_node(entry);
+                       offset = get_slot_offset(parent, sibentry);
+                       entry = rcu_dereference_raw(*sibentry);
                }
        }
 #endif
index 8a287dfc53722c724fa3ec21992609fa2a8c3c58..2d0986a64f1f729548937e239860ac7620a16393 100644 (file)
  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
  */
 
+static int page_cache_tree_insert(struct address_space *mapping,
+                                 struct page *page, void **shadowp)
+{
+       struct radix_tree_node *node;
+       void **slot;
+       int error;
+
+       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
+                                   &node, &slot);
+       if (error)
+               return error;
+       if (*slot) {
+               void *p;
+
+               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+               if (!radix_tree_exceptional_entry(p))
+                       return -EEXIST;
+
+               mapping->nrexceptional--;
+               if (!dax_mapping(mapping)) {
+                       if (shadowp)
+                               *shadowp = p;
+                       if (node)
+                               workingset_node_shadows_dec(node);
+               } else {
+                       /* DAX can replace empty locked entry with a hole */
+                       WARN_ON_ONCE(p !=
+                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+                                        RADIX_DAX_ENTRY_LOCK));
+                       /* DAX accounts exceptional entries as normal pages */
+                       if (node)
+                               workingset_node_pages_dec(node);
+                       /* Wakeup waiters for exceptional entry lock */
+                       dax_wake_mapping_entry_waiter(mapping, page->index,
+                                                     false);
+               }
+       }
+       radix_tree_replace_slot(slot, page);
+       mapping->nrpages++;
+       if (node) {
+               workingset_node_pages_inc(node);
+               /*
+                * Don't track node that contains actual pages.
+                *
+                * Avoid acquiring the list_lru lock if already
+                * untracked.  The list_empty() test is safe as
+                * node->private_list is protected by
+                * mapping->tree_lock.
+                */
+               if (!list_empty(&node->private_list))
+                       list_lru_del(&workingset_shadow_nodes,
+                                    &node->private_list);
+       }
+       return 0;
+}
+
 static void page_cache_tree_delete(struct address_space *mapping,
                                   struct page *page, void *shadow)
 {
@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 
                spin_lock_irqsave(&mapping->tree_lock, flags);
                __delete_from_page_cache(old, NULL);
-               error = radix_tree_insert(&mapping->page_tree, offset, new);
+               error = page_cache_tree_insert(mapping, new, NULL);
                BUG_ON(error);
                mapping->nrpages++;
 
@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
-static int page_cache_tree_insert(struct address_space *mapping,
-                                 struct page *page, void **shadowp)
-{
-       struct radix_tree_node *node;
-       void **slot;
-       int error;
-
-       error = __radix_tree_create(&mapping->page_tree, page->index, 0,
-                                   &node, &slot);
-       if (error)
-               return error;
-       if (*slot) {
-               void *p;
-
-               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
-               if (!radix_tree_exceptional_entry(p))
-                       return -EEXIST;
-
-               mapping->nrexceptional--;
-               if (!dax_mapping(mapping)) {
-                       if (shadowp)
-                               *shadowp = p;
-                       if (node)
-                               workingset_node_shadows_dec(node);
-               } else {
-                       /* DAX can replace empty locked entry with a hole */
-                       WARN_ON_ONCE(p !=
-                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-                                        RADIX_DAX_ENTRY_LOCK));
-                       /* DAX accounts exceptional entries as normal pages */
-                       if (node)
-                               workingset_node_pages_dec(node);
-                       /* Wakeup waiters for exceptional entry lock */
-                       dax_wake_mapping_entry_waiter(mapping, page->index,
-                                                     false);
-               }
-       }
-       radix_tree_replace_slot(slot, page);
-       mapping->nrpages++;
-       if (node) {
-               workingset_node_pages_inc(node);
-               /*
-                * Don't track node that contains actual pages.
-                *
-                * Avoid acquiring the list_lru lock if already
-                * untracked.  The list_empty() test is safe as
-                * node->private_list is protected by
-                * mapping->tree_lock.
-                */
-               if (!list_empty(&node->private_list))
-                       list_lru_del(&workingset_shadow_nodes,
-                                    &node->private_list);
-       }
-       return 0;
-}
-
 static int __add_to_page_cache_locked(struct page *page,
                                      struct address_space *mapping,
                                      pgoff_t offset, gfp_t gfp_mask,
index a6abd76baa725d56eb1e9adeb8ee1b6fca86cb80..53ae6d00656aced3b21ebe1af8215fc1112af5eb 100644 (file)
@@ -1138,9 +1138,6 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
        bool was_writable;
        int flags = 0;
 
-       /* A PROT_NONE fault should not end up here */
-       BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
-
        fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
        if (unlikely(!pmd_same(pmd, *fe->pmd)))
                goto out_unlock;
index 73d43bafd9fbc41ad322b9d26e80e548481a23b4..5048083b60f23bb2f0f78af969d6aa5df39aebfd 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
 {
        struct rmap_item *rmap_item;
 
-       rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
+       rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
+                                               __GFP_NORETRY | __GFP_NOWARN);
        if (rmap_item)
                ksm_rmap_items++;
        return rmap_item;
index 83be99d9d8a15006e2cb267a3dd46e10e470f66e..793fe0f9841c09ce87b1d0e0a43f1b416ead6390 100644 (file)
@@ -3351,9 +3351,6 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
        bool was_writable = pte_write(pte);
        int flags = 0;
 
-       /* A PROT_NONE fault should not end up here */
-       BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
-
        /*
        * The "pte" at this point cannot be used safely without
        * validation through pte_unmap_same(). It's of NUMA type but
@@ -3458,6 +3455,11 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
        return VM_FAULT_FALLBACK;
 }
 
+static inline bool vma_is_accessible(struct vm_area_struct *vma)
+{
+       return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
+}
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3524,7 +3526,7 @@ static int handle_pte_fault(struct fault_env *fe)
        if (!pte_present(entry))
                return do_swap_page(fe, entry);
 
-       if (pte_protnone(entry))
+       if (pte_protnone(entry) && vma_is_accessible(fe->vma))
                return do_numa_page(fe, entry);
 
        fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
@@ -3590,7 +3592,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 
                barrier();
                if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
-                       if (pmd_protnone(orig_pmd))
+                       if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&fe, orig_pmd);
 
                        if ((fe.flags & FAULT_FLAG_WRITE) &&
index b58906b6215cbb3d2b43abbcdb758e0809c42c7f..9d29ba0f7192b6c71afc3e2e6bc6b9beff6ed09f 100644 (file)
@@ -1555,8 +1555,8 @@ static struct page *new_node_page(struct page *page, unsigned long private,
 {
        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
        int nid = page_to_nid(page);
-       nodemask_t nmask = node_online_map;
-       struct page *new_page;
+       nodemask_t nmask = node_states[N_MEMORY];
+       struct page *new_page = NULL;
 
        /*
         * TODO: allocate a destination hugepage from a nearest neighbor node,
@@ -1567,14 +1567,14 @@ static struct page *new_node_page(struct page *page, unsigned long private,
                return alloc_huge_page_node(page_hstate(compound_head(page)),
                                        next_node_in(nid, nmask));
 
-       if (nid != next_node_in(nid, nmask))
-               node_clear(nid, nmask);
+       node_clear(nid, nmask);
 
        if (PageHighMem(page)
            || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
                gfp_mask |= __GFP_HIGHMEM;
 
-       new_page = __alloc_pages_nodemask(gfp_mask, 0,
+       if (!nodes_empty(nmask))
+               new_page = __alloc_pages_nodemask(gfp_mask, 0,
                                        node_zonelist(nid, gfp_mask), &nmask);
        if (!new_page)
                new_page = __alloc_pages(gfp_mask, 0,
index fd8b2b5741b141a7bc4457d93929c100d988a639..971fc83e6402858dd558d0f04609ddad38373eb4 100644 (file)
@@ -270,7 +270,7 @@ bool shmem_charge(struct inode *inode, long pages)
                info->alloced -= pages;
                shmem_recalc_inode(inode);
                spin_unlock_irqrestore(&info->lock, flags);
-
+               shmem_unacct_blocks(info->flags, pages);
                return false;
        }
        percpu_counter_add(&sbinfo->used_blocks, pages);
@@ -291,6 +291,7 @@ void shmem_uncharge(struct inode *inode, long pages)
 
        if (sbinfo->max_blocks)
                percpu_counter_sub(&sbinfo->used_blocks, pages);
+       shmem_unacct_blocks(info->flags, pages);
 }
 
 /*
@@ -1980,7 +1981,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
                                return addr;
                        sb = shm_mnt->mnt_sb;
                }
-               if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
+               if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
                        return addr;
        }
 
index b1e12a1ea9cfcc6ece1690305fc2d44bfdcf5b37..0fe8b7113868fa01f308331c48ebf36a4c1c87ac 100644 (file)
@@ -2303,23 +2303,6 @@ out:
        }
 }
 
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-static void init_tlb_ubc(void)
-{
-       /*
-        * This deliberately does not clear the cpumask as it's expensive
-        * and unnecessary. If there happens to be data in there then the
-        * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
-        * then will be cleared.
-        */
-       current->tlb_ubc.flush_required = false;
-}
-#else
-static inline void init_tlb_ubc(void)
-{
-}
-#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
-
 /*
  * This is a basic per-node page freer.  Used by both kswapd and direct reclaim.
  */
@@ -2355,8 +2338,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
        scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
                         sc->priority == DEF_PRIORITY);
 
-       init_tlb_ubc();
-
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
index 69551cfae97bd50bece343da57e2d12a79c2b74f..617475f529f42aed1e1ba34565e034ed7cad8157 100644 (file)
@@ -418,21 +418,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
         * no pages, so we expect to be able to remove them all and
         * delete and free the empty node afterwards.
         */
-
-       BUG_ON(!node->count);
-       BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
+       BUG_ON(!workingset_node_shadows(node));
+       BUG_ON(workingset_node_pages(node));
 
        for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
                if (node->slots[i]) {
                        BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
                        node->slots[i] = NULL;
-                       BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
-                       node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+                       workingset_node_shadows_dec(node);
                        BUG_ON(!mapping->nrexceptional);
                        mapping->nrexceptional--;
                }
        }
-       BUG_ON(node->count);
+       BUG_ON(workingset_node_shadows(node));
        inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
        if (!__radix_tree_delete_node(&mapping->page_tree, node))
                BUG();
index 42396a74405df03002001d8f972599fa827bb3bf..a68f03133df9bd30543c7d7190f6e2b8d285c628 100644 (file)
@@ -363,6 +363,7 @@ is_mcounted_section_name(char const *const txtname)
                strcmp(".sched.text",    txtname) == 0 ||
                strcmp(".spinlock.text", txtname) == 0 ||
                strcmp(".irqentry.text", txtname) == 0 ||
+               strcmp(".softirqentry.text", txtname) == 0 ||
                strcmp(".kprobes.text", txtname) == 0 ||
                strcmp(".text.unlikely", txtname) == 0;
 }
index 96e2486a6fc479559eba03662317c82f802a0f43..2d48011bc36294b39602752c6141fe88075a0135 100755 (executable)
@@ -134,6 +134,7 @@ my %text_sections = (
      ".sched.text" => 1,
      ".spinlock.text" => 1,
      ".irqentry.text" => 1,
+     ".softirqentry.text" => 1,
      ".kprobes.text" => 1,
      ".text.unlikely" => 1,
 );
index 5adbfc32242f81b0f6396fd7d0656c5aa4eca6ff..17a06105ccb616ea91aaa5635341136dda1fc915 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/rcupdate.h>
 #include <linux/scatterlist.h>
 #include <linux/ctype.h>
+#include <crypto/aes.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
@@ -478,6 +479,7 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        struct crypto_skcipher *tfm;
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
+       u8 iv[AES_BLOCK_SIZE];
        unsigned int padlen;
        char pad[16];
        int ret;
@@ -500,8 +502,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        sg_init_table(sg_out, 1);
        sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
 
-       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
-                                  epayload->iv);
+       memcpy(iv, epayload->iv, sizeof(iv));
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
        ret = crypto_skcipher_encrypt(req);
        tfm = crypto_skcipher_reqtfm(req);
        skcipher_request_free(req);
@@ -581,6 +583,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
        struct crypto_skcipher *tfm;
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
+       u8 iv[AES_BLOCK_SIZE];
        char pad[16];
        int ret;
 
@@ -599,8 +602,8 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
                   epayload->decrypted_datalen);
        sg_set_buf(&sg_out[1], pad, sizeof pad);
 
-       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
-                                  epayload->iv);
+       memcpy(iv, epayload->iv, sizeof(iv));
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
        ret = crypto_skcipher_decrypt(req);
        tfm = crypto_skcipher_reqtfm(req);
        skcipher_request_free(req);
index dd48f421844c7902773526d8c0845109ab9c5e55..f64c57bf1d4baace6f988a5fd0b95b894ce2fb49 100644 (file)
@@ -603,7 +603,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
                        return -ENOMEM;
                sprintf(t->label[i], "label%d", i);
 
-               t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS,
+               t->flush[i] = test_alloc(t, max(PAGE_SIZE,
+                                       sizeof(u64) * NUM_HINTS),
                                &t->flush_dma[i]);
                if (!t->flush[i])
                        return -ENOMEM;
index 3b530467148ec44d4b329e5bece6c5d9b9c056d4..9d0919ed52a4088d237ba47aa00219e1e1dee47b 100644 (file)
@@ -1,5 +1,5 @@
 
-CFLAGS += -I. -g -Wall -D_LGPL_SOURCE
+CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE
 LDFLAGS += -lpthread -lurcu
 TARGETS = main
 OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
index 39d9b9568fe2291661e8ea49879670af5dc1a27d..05d7bc4889711d44d57d9b58224b21cd079b6c45 100644 (file)
@@ -124,6 +124,8 @@ static void multiorder_check(unsigned long index, int order)
        unsigned long i;
        unsigned long min = index & ~((1UL << order) - 1);
        unsigned long max = min + (1UL << order);
+       void **slot;
+       struct item *item2 = item_create(min);
        RADIX_TREE(tree, GFP_KERNEL);
 
        printf("Multiorder index %ld, order %d\n", index, order);
@@ -139,13 +141,19 @@ static void multiorder_check(unsigned long index, int order)
                item_check_absent(&tree, i);
        for (i = max; i < 2*max; i++)
                item_check_absent(&tree, i);
+       for (i = min; i < max; i++)
+               assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
+
+       slot = radix_tree_lookup_slot(&tree, index);
+       free(*slot);
+       radix_tree_replace_slot(slot, item2);
        for (i = min; i < max; i++) {
-               static void *entry = (void *)
-                                       (0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY);
-               assert(radix_tree_insert(&tree, i, entry) == -EEXIST);
+               struct item *item = item_lookup(&tree, i);
+               assert(item != 0);
+               assert(item->index == min);
        }
 
-       assert(item_delete(&tree, index) != 0);
+       assert(item_delete(&tree, min) != 0);
 
        for (i = 0; i < 2*max; i++)
                item_check_absent(&tree, i);
This page took 0.087464 seconds and 5 git commands to generate.