Merge tag 'drm-psr-fixes-for-v4.8' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Aug 2016 23:29:03 +0000 (19:29 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Aug 2016 23:29:03 +0000 (19:29 -0400)
Pull i915 drm fixes from Dave Airlie:
 "These are the two fixes from Ville for the bug you are seeing on your
  HSW laptop.

  They pretty much disable PSR in some cases where the panel reports a
  setup time that would cause issues, like you seem to have"

* tag 'drm-psr-fixes-for-v4.8' of git://people.freedesktop.org/~airlied/linux:
  drm/i915: Check PSR setup time vs. vblank length
  drm/dp: Add drm_dp_psr_setup_time()

133 files changed:
Documentation/PCI/MSI-HOWTO.txt
Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
Documentation/devicetree/bindings/mtd/atmel-quadspi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
Documentation/devicetree/bindings/mtd/cadence-quadspi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/gpmc-nand.txt
Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/mtk-nand.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/sunxi-nand.txt
Documentation/devicetree/bindings/pci/aardvark-pci.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/include/asm/mach/pci.h
arch/arm/kernel/bios32.c
arch/arm64/Kconfig
arch/arm64/boot/dts/marvell/armada-3720-db.dts
arch/arm64/boot/dts/marvell/armada-37xx.dtsi
arch/arm64/kernel/pci.c
arch/cris/arch-v10/drivers/axisflashmap.c
arch/cris/arch-v32/drivers/axisflashmap.c
arch/microblaze/include/asm/pci.h
arch/microblaze/pci/pci-common.c
arch/mips/include/asm/pci.h
arch/mips/pci/pci.c
arch/powerpc/include/asm/pci.h
arch/powerpc/kernel/pci-common.c
arch/sparc/include/asm/pci_64.h
arch/sparc/kernel/pci.c
arch/unicore32/kernel/pci.c
arch/x86/pci/common.c
arch/x86/pci/vmd.c
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/pci_mcfg.c [new file with mode: 0644]
drivers/acpi/pci_root.c
drivers/irqchip/Kconfig
drivers/memory/Kconfig
drivers/memory/fsl_ifc.c
drivers/misc/genwqe/card_base.c
drivers/mtd/chips/cfi_cmdset_0020.c
drivers/mtd/devices/Kconfig
drivers/mtd/devices/m25p80.c
drivers/mtd/maps/physmap_of.c
drivers/mtd/maps/pmcmsp-flash.c
drivers/mtd/maps/sa1100-flash.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/Makefile
drivers/mtd/nand/brcmnand/brcmnand.c
drivers/mtd/nand/jz4780_bch.c
drivers/mtd/nand/jz4780_nand.c
drivers/mtd/nand/mtk_ecc.c [new file with mode: 0644]
drivers/mtd/nand/mtk_ecc.h [new file with mode: 0644]
drivers/mtd/nand/mtk_nand.c [new file with mode: 0644]
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/sunxi_nand.c
drivers/mtd/nand/xway_nand.c
drivers/mtd/onenand/onenand_base.c
drivers/mtd/spi-nor/Kconfig
drivers/mtd/spi-nor/Makefile
drivers/mtd/spi-nor/atmel-quadspi.c [new file with mode: 0644]
drivers/mtd/spi-nor/cadence-quadspi.c [new file with mode: 0644]
drivers/mtd/spi-nor/fsl-quadspi.c
drivers/mtd/spi-nor/hisi-sfc.c [new file with mode: 0644]
drivers/mtd/spi-nor/mtk-quadspi.c
drivers/mtd/spi-nor/nxp-spifi.c
drivers/mtd/spi-nor/spi-nor.c
drivers/mtd/ssfdc.c
drivers/mtd/tests/nandbiterrs.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/nvme/host/pci.c
drivers/pci/Kconfig
drivers/pci/bus.c
drivers/pci/ecam.c
drivers/pci/ecam.h [deleted file]
drivers/pci/host/Kconfig
drivers/pci/host/Makefile
drivers/pci/host/pci-aardvark.c [new file with mode: 0644]
drivers/pci/host/pci-dra7xx.c
drivers/pci/host/pci-host-common.c
drivers/pci/host/pci-host-generic.c
drivers/pci/host/pci-hyperv.c
drivers/pci/host/pci-keystone.c
drivers/pci/host/pci-layerscape.c
drivers/pci/host/pci-mvebu.c
drivers/pci/host/pci-rcar-gen2.c
drivers/pci/host/pci-tegra.c
drivers/pci/host/pci-thunder-ecam.c
drivers/pci/host/pci-thunder-pem.c
drivers/pci/host/pci-versatile.c
drivers/pci/host/pci-xgene.c
drivers/pci/host/pcie-altera.c
drivers/pci/host/pcie-armada8k.c
drivers/pci/host/pcie-artpec6.c [new file with mode: 0644]
drivers/pci/host/pcie-designware-plat.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-hisi.c
drivers/pci/host/pcie-iproc.c
drivers/pci/host/pcie-rcar.c
drivers/pci/host/pcie-xilinx-nwl.c
drivers/pci/host/pcie-xilinx.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/msi.c
drivers/pci/pci-driver.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/Kconfig
drivers/pci/pcie/aspm.c
drivers/pci/pcie/pcie-dpc.c
drivers/pci/pcie/portdrv_core.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/probe.c
drivers/pci/proc.c
drivers/pci/quirks.c
drivers/pci/remove.c
drivers/pci/setup-bus.c
drivers/scsi/lpfc/lpfc_init.c
drivers/usb/host/xhci-pci.c
include/linux/mtd/nand.h
include/linux/mtd/spi-nor.h
include/linux/pci-acpi.h
include/linux/pci-ecam.h [new file with mode: 0644]
include/linux/pci.h

index 1179850f453c66849c1808f83b0955a63cd33ad1..c55df2911136c90d37944616cfeebaeca4954a96 100644 (file)
@@ -78,422 +78,111 @@ CONFIG_PCI_MSI option.
 
 4.2 Using MSI
 
-Most of the hard work is done for the driver in the PCI layer.  It simply
-has to request that the PCI layer set up the MSI capability for this
+Most of the hard work is done for the driver in the PCI layer.  The driver
+simply has to request that the PCI layer set up the MSI capability for this
 device.
 
-4.2.1 pci_enable_msi
+To automatically use MSI or MSI-X interrupt vectors, use the following
+function:
 
-int pci_enable_msi(struct pci_dev *dev)
+  int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+               unsigned int max_vecs, unsigned int flags);
 
-A successful call allocates ONE interrupt to the device, regardless
-of how many MSIs the device supports.  The device is switched from
-pin-based interrupt mode to MSI mode.  The dev->irq number is changed
-to a new number which represents the message signaled interrupt;
-consequently, this function should be called before the driver calls
-request_irq(), because an MSI is delivered via a vector that is
-different from the vector of a pin-based interrupt.
+which allocates up to max_vecs interrupt vectors for a PCI device.  It
+returns the number of vectors allocated or a negative error.  If the device
+has a requirements for a minimum number of vectors the driver can pass a
+min_vecs argument set to this limit, and the PCI core will return -ENOSPC
+if it can't meet the minimum number of vectors.
 
-4.2.2 pci_enable_msi_range
+The flags argument should normally be set to 0, but can be used to pass the
+PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
+MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
+case the device does not support legacy interrupt lines.
 
-int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+By default this function will spread the interrupts around the available
+CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
+flag.
 
-This function allows a device driver to request any number of MSI
-interrupts within specified range from 'minvec' to 'maxvec'.
+To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
+vectors, use the following function:
 
-If this function returns a positive number it indicates the number of
-MSI interrupts that have been successfully allocated.  In this case
-the device is switched from pin-based interrupt mode to MSI mode and
-updates dev->irq to be the lowest of the new interrupts assigned to it.
-The other interrupts assigned to the device are in the range dev->irq
-to dev->irq + returned value - 1.  Device driver can use the returned
-number of successfully allocated MSI interrupts to further allocate
-and initialize device resources.
+  int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
 
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to request any more MSI interrupts for
-this device.
+Any allocated resources should be freed before removing the device using
+the following function:
 
-This function should be called before the driver calls request_irq(),
-because MSI interrupts are delivered via vectors that are different
-from the vector of a pin-based interrupt.
+  void pci_free_irq_vectors(struct pci_dev *dev);
 
-It is ideal if drivers can cope with a variable number of MSI interrupts;
-there are many reasons why the platform may not be able to provide the
-exact number that a driver asks for.
+If a device supports both MSI-X and MSI capabilities, this API will use the
+MSI-X facilities in preference to the MSI facilities.  MSI-X supports any
+number of interrupts between 1 and 2048.  In contrast, MSI is restricted to
+a maximum of 32 interrupts (and must be a power of two).  In addition, the
+MSI interrupt vectors must be allocated consecutively, so the system might
+not be able to allocate as many vectors for MSI as it could for MSI-X.  On
+some platforms, MSI interrupts must all be targeted at the same set of CPUs
+whereas MSI-X interrupts can all be targeted at different CPUs.
 
-There could be devices that can not operate with just any number of MSI
-interrupts within a range.  See chapter 4.3.1.3 to get the idea how to
-handle such devices for MSI-X - the same logic applies to MSI.
+If a device supports neither MSI-X or MSI it will fall back to a single
+legacy IRQ vector.
 
-4.2.1.1 Maximum possible number of MSI interrupts
+The typical usage of MSI or MSI-X interrupts is to allocate as many vectors
+as possible, likely up to the limit supported by the device.  If nvec is
+larger than the number supported by the device it will automatically be
+capped to the supported limit, so there is no need to query the number of
+vectors supported beforehand:
 
-The typical usage of MSI interrupts is to allocate as many vectors as
-possible, likely up to the limit returned by pci_msi_vec_count() function:
-
-static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
-{
-       return pci_enable_msi_range(pdev, 1, nvec);
-}
-
-Note the value of 'minvec' parameter is 1.  As 'minvec' is inclusive,
-the value of 0 would be meaningless and could result in error.
-
-Some devices have a minimal limit on number of MSI interrupts.
-In this case the function could look like this:
-
-static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
-{
-       return pci_enable_msi_range(pdev, FOO_DRIVER_MINIMUM_NVEC, nvec);
-}
-
-4.2.1.2 Exact number of MSI interrupts
+       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
+       if (nvec < 0)
+               goto out_err;
 
 If a driver is unable or unwilling to deal with a variable number of MSI
-interrupts it could request a particular number of interrupts by passing
-that number to pci_enable_msi_range() function as both 'minvec' and 'maxvec'
-parameters:
-
-static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
-{
-       return pci_enable_msi_range(pdev, nvec, nvec);
-}
-
-Note, unlike pci_enable_msi_exact() function, which could be also used to
-enable a particular number of MSI-X interrupts, pci_enable_msi_range()
-returns either a negative errno or 'nvec' (not negative errno or 0 - as
-pci_enable_msi_exact() does).
-
-4.2.1.3 Single MSI mode
-
-The most notorious example of the request type described above is
-enabling the single MSI mode for a device.  It could be done by passing
-two 1s as 'minvec' and 'maxvec':
-
-static int foo_driver_enable_single_msi(struct pci_dev *pdev)
-{
-       return pci_enable_msi_range(pdev, 1, 1);
-}
-
-Note, unlike pci_enable_msi() function, which could be also used to
-enable the single MSI mode, pci_enable_msi_range() returns either a
-negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
-does).
-
-4.2.3 pci_enable_msi_exact
-
-int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
-
-This variation on pci_enable_msi_range() call allows a device driver to
-request exactly 'nvec' MSIs.
-
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to request any more MSI interrupts for
-this device.
-
-By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
-returns zero in case of success, which indicates MSI interrupts have been
-successfully allocated.
-
-4.2.4 pci_disable_msi
-
-void pci_disable_msi(struct pci_dev *dev)
-
-This function should be used to undo the effect of pci_enable_msi_range().
-Calling it restores dev->irq to the pin-based interrupt number and frees
-the previously allocated MSIs.  The interrupts may subsequently be assigned
-to another device, so drivers should not cache the value of dev->irq.
-
-Before calling this function, a device driver must always call free_irq()
-on any interrupt for which it previously called request_irq().
-Failure to do so results in a BUG_ON(), leaving the device with
-MSI enabled and thus leaking its vector.
-
-4.2.4 pci_msi_vec_count
-
-int pci_msi_vec_count(struct pci_dev *dev)
-
-This function could be used to retrieve the number of MSI vectors the
-device requested (via the Multiple Message Capable register). The MSI
-specification only allows the returned value to be a power of two,
-up to a maximum of 2^5 (32).
-
-If this function returns a negative number, it indicates the device is
-not capable of sending MSIs.
-
-If this function returns a positive number, it indicates the maximum
-number of MSI interrupt vectors that could be allocated.
-
-4.3 Using MSI-X
-
-The MSI-X capability is much more flexible than the MSI capability.
-It supports up to 2048 interrupts, each of which can be controlled
-independently.  To support this flexibility, drivers must use an array of
-`struct msix_entry':
-
-struct msix_entry {
-       u16     vector; /* kernel uses to write alloc vector */
-       u16     entry; /* driver uses to specify entry */
-};
-
-This allows for the device to use these interrupts in a sparse fashion;
-for example, it could use interrupts 3 and 1027 and yet allocate only a
-two-element array.  The driver is expected to fill in the 'entry' value
-in each element of the array to indicate for which entries the kernel
-should assign interrupts; it is invalid to fill in two entries with the
-same number.
-
-4.3.1 pci_enable_msix_range
-
-int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
-                         int minvec, int maxvec)
-
-Calling this function asks the PCI subsystem to allocate any number of
-MSI-X interrupts within specified range from 'minvec' to 'maxvec'.
-The 'entries' argument is a pointer to an array of msix_entry structs
-which should be at least 'maxvec' entries in size.
-
-On success, the device is switched into MSI-X mode and the function
-returns the number of MSI-X interrupts that have been successfully
-allocated.  In this case the 'vector' member in entries numbered from
-0 to the returned value - 1 is populated with the interrupt number;
-the driver should then call request_irq() for each 'vector' that it
-decides to use.  The device driver is responsible for keeping track of the
-interrupts assigned to the MSI-X vectors so it can free them again later.
-Device driver can use the returned number of successfully allocated MSI-X
-interrupts to further allocate and initialize device resources.
-
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to allocate any more MSI-X interrupts for
-this device.
-
-This function, in contrast with pci_enable_msi_range(), does not adjust
-dev->irq.  The device will not generate interrupts for this interrupt
-number once MSI-X is enabled.
-
-Device drivers should normally call this function once per device
-during the initialization phase.
-
-It is ideal if drivers can cope with a variable number of MSI-X interrupts;
-there are many reasons why the platform may not be able to provide the
-exact number that a driver asks for.
-
-There could be devices that can not operate with just any number of MSI-X
-interrupts within a range.  E.g., an network adapter might need let's say
-four vectors per each queue it provides.  Therefore, a number of MSI-X
-interrupts allocated should be a multiple of four.  In this case interface
-pci_enable_msix_range() can not be used alone to request MSI-X interrupts
-(since it can allocate any number within the range, without any notion of
-the multiple of four) and the device driver should master a custom logic
-to request the required number of MSI-X interrupts.
-
-4.3.1.1 Maximum possible number of MSI-X interrupts
-
-The typical usage of MSI-X interrupts is to allocate as many vectors as
-possible, likely up to the limit returned by pci_msix_vec_count() function:
-
-static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
-{
-       return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
-                                    1, nvec);
-}
-
-Note the value of 'minvec' parameter is 1.  As 'minvec' is inclusive,
-the value of 0 would be meaningless and could result in error.
-
-Some devices have a minimal limit on number of MSI-X interrupts.
-In this case the function could look like this:
-
-static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
-{
-       return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
-                                    FOO_DRIVER_MINIMUM_NVEC, nvec);
-}
-
-4.3.1.2 Exact number of MSI-X interrupts
-
-If a driver is unable or unwilling to deal with a variable number of MSI-X
-interrupts it could request a particular number of interrupts by passing
-that number to pci_enable_msix_range() function as both 'minvec' and 'maxvec'
-parameters:
-
-static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
-{
-       return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
-                                    nvec, nvec);
-}
-
-Note, unlike pci_enable_msix_exact() function, which could be also used to
-enable a particular number of MSI-X interrupts, pci_enable_msix_range()
-returns either a negative errno or 'nvec' (not negative errno or 0 - as
-pci_enable_msix_exact() does).
-
-4.3.1.3 Specific requirements to the number of MSI-X interrupts
-
-As noted above, there could be devices that can not operate with just any
-number of MSI-X interrupts within a range.  E.g., let's assume a device that
-is only capable sending the number of MSI-X interrupts which is a power of
-two.  A routine that enables MSI-X mode for such device might look like this:
-
-/*
- * Assume 'minvec' and 'maxvec' are non-zero
- */
-static int foo_driver_enable_msix(struct foo_adapter *adapter,
-                                 int minvec, int maxvec)
-{
-       int rc;
-
-       minvec = roundup_pow_of_two(minvec);
-       maxvec = rounddown_pow_of_two(maxvec);
-
-       if (minvec > maxvec)
-               return -ERANGE;
-
-retry:
-       rc = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
-                                  maxvec, maxvec);
-       /*
-        * -ENOSPC is the only error code allowed to be analyzed
-        */
-       if (rc == -ENOSPC) {
-               if (maxvec == 1)
-                       return -ENOSPC;
-
-               maxvec /= 2;
-
-               if (minvec > maxvec)
-                       return -ENOSPC;
-
-               goto retry;
-       }
-
-       return rc;
-}
-
-Note how pci_enable_msix_range() return value is analyzed for a fallback -
-any error code other than -ENOSPC indicates a fatal error and should not
-be retried.
-
-4.3.2 pci_enable_msix_exact
-
-int pci_enable_msix_exact(struct pci_dev *dev,
-                         struct msix_entry *entries, int nvec)
-
-This variation on pci_enable_msix_range() call allows a device driver to
-request exactly 'nvec' MSI-Xs.
-
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to allocate any more MSI-X interrupts for
-this device.
-
-By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
-returns zero in case of success, which indicates MSI-X interrupts have been
-successfully allocated.
-
-Another version of a routine that enables MSI-X mode for a device with
-specific requirements described in chapter 4.3.1.3 might look like this:
-
-/*
- * Assume 'minvec' and 'maxvec' are non-zero
- */
-static int foo_driver_enable_msix(struct foo_adapter *adapter,
-                                 int minvec, int maxvec)
-{
-       int rc;
-
-       minvec = roundup_pow_of_two(minvec);
-       maxvec = rounddown_pow_of_two(maxvec);
-
-       if (minvec > maxvec)
-               return -ERANGE;
-
-retry:
-       rc = pci_enable_msix_exact(adapter->pdev,
-                                  adapter->msix_entries, maxvec);
-
-       /*
-        * -ENOSPC is the only error code allowed to be analyzed
-        */
-       if (rc == -ENOSPC) {
-               if (maxvec == 1)
-                       return -ENOSPC;
-
-               maxvec /= 2;
-
-               if (minvec > maxvec)
-                       return -ENOSPC;
-
-               goto retry;
-       } else if (rc < 0) {
-               return rc;
-       }
-
-       return maxvec;
-}
-
-4.3.3 pci_disable_msix
-
-void pci_disable_msix(struct pci_dev *dev)
-
-This function should be used to undo the effect of pci_enable_msix_range().
-It frees the previously allocated MSI-X interrupts. The interrupts may
-subsequently be assigned to another device, so drivers should not cache
-the value of the 'vector' elements over a call to pci_disable_msix().
-
-Before calling this function, a device driver must always call free_irq()
-on any interrupt for which it previously called request_irq().
-Failure to do so results in a BUG_ON(), leaving the device with
-MSI-X enabled and thus leaking its vector.
-
-4.3.3 The MSI-X Table
-
-The MSI-X capability specifies a BAR and offset within that BAR for the
-MSI-X Table.  This address is mapped by the PCI subsystem, and should not
-be accessed directly by the device driver.  If the driver wishes to
-mask or unmask an interrupt, it should call disable_irq() / enable_irq().
+interrupts it can request a particular number of interrupts by passing that
+number to pci_alloc_irq_vectors() function as both 'min_vecs' and
+'max_vecs' parameters:
 
-4.3.4 pci_msix_vec_count
+       ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
+       if (ret < 0)
+               goto out_err;
 
-int pci_msix_vec_count(struct pci_dev *dev)
+The most notorious example of the request type described above is enabling
+the single MSI mode for a device.  It could be done by passing two 1s as
+'min_vecs' and 'max_vecs':
 
-This function could be used to retrieve number of entries in the device
-MSI-X table.
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+       if (ret < 0)
+               goto out_err;
 
-If this function returns a negative number, it indicates the device is
-not capable of sending MSI-Xs.
+Some devices might not support using legacy line interrupts, in which case
+the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
+can't provide MSI or MSI-X interrupts:
 
-If this function returns a positive number, it indicates the maximum
-number of MSI-X interrupt vectors that could be allocated.
+       nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
+       if (nvec < 0)
+               goto out_err;
 
-4.4 Handling devices implementing both MSI and MSI-X capabilities
+4.3 Legacy APIs
 
-If a device implements both MSI and MSI-X capabilities, it can
-run in either MSI mode or MSI-X mode, but not both simultaneously.
-This is a requirement of the PCI spec, and it is enforced by the
-PCI layer.  Calling pci_enable_msi_range() when MSI-X is already
-enabled or pci_enable_msix_range() when MSI is already enabled
-results in an error.  If a device driver wishes to switch between MSI
-and MSI-X at runtime, it must first quiesce the device, then switch
-it back to pin-interrupt mode, before calling pci_enable_msi_range()
-or pci_enable_msix_range() and resuming operation.  This is not expected
-to be a common operation but may be useful for debugging or testing
-during development.
+The following old APIs to enable and disable MSI or MSI-X interrupts should
+not be used in new code:
 
-4.5 Considerations when using MSIs
+  pci_enable_msi()             /* deprecated */
+  pci_enable_msi_range()       /* deprecated */
+  pci_enable_msi_exact()       /* deprecated */
+  pci_disable_msi()            /* deprecated */
+  pci_enable_msix_range()      /* deprecated */
+  pci_enable_msix_exact()      /* deprecated */
+  pci_disable_msix()           /* deprecated */
 
-4.5.1 Choosing between MSI-X and MSI
+Additionally there are APIs to provide the number of supported MSI or MSI-X
+vectors: pci_msi_vec_count() and pci_msix_vec_count().  In general these
+should be avoided in favor of letting pci_alloc_irq_vectors() cap the
+number of vectors.  If you have a legitimate special use case for the count
+of vectors we might have to revisit that decision and add a
+pci_nr_irq_vectors() helper that handles MSI and MSI-X transparently.
 
-If your device supports both MSI-X and MSI capabilities, you should use
-the MSI-X facilities in preference to the MSI facilities.  As mentioned
-above, MSI-X supports any number of interrupts between 1 and 2048.
-In contrast, MSI is restricted to a maximum of 32 interrupts (and
-must be a power of two).  In addition, the MSI interrupt vectors must
-be allocated consecutively, so the system might not be able to allocate
-as many vectors for MSI as it could for MSI-X.  On some platforms, MSI
-interrupts must all be targeted at the same set of CPUs whereas MSI-X
-interrupts can all be targeted at different CPUs.
+4.4 Considerations when using MSIs
 
-4.5.2 Spinlocks
+4.4.1 Spinlocks
 
 Most device drivers have a per-device spinlock which is taken in the
 interrupt handler.  With pin-based interrupts or a single MSI, it is not
@@ -505,7 +194,7 @@ acquire the spinlock.  Such deadlocks can be avoided by using
 spin_lock_irqsave() or spin_lock_irq() which disable local interrupts
 and acquire the lock (see Documentation/DocBook/kernel-locking).
 
-4.6 How to tell whether MSI/MSI-X is enabled on a device
+4.5 How to tell whether MSI/MSI-X is enabled on a device
 
 Using 'lspci -v' (as root) may show some devices with "MSI", "Message
 Signalled Interrupts" or "MSI-X" capabilities.  Each of these capabilities
index 21055e21023406e9076bc12d213e57dc3740eaac..c1359f4d48d709efe12d7c081ee45878d0f2676e 100644 (file)
@@ -46,6 +46,10 @@ Required properties:
                        0 maps to GPMC_WAIT0 pin.
  - gpio-cells:         Must be set to 2
 
+Required properties when using NAND prefetch dma:
+ - dmas                        GPMC NAND prefetch dma channel
+ - dma-names           Must be set to "rxtx"
+
 Timing properties for child nodes. All are optional and default to 0.
 
  - gpmc,sync-clk-ps:   Minimum clock period for synchronous mode, in picoseconds
@@ -137,7 +141,8 @@ Example for an AM33xx board:
                ti,hwmods = "gpmc";
                reg = <0x50000000 0x2000>;
                interrupts = <100>;
-
+               dmas = <&edma 52 0>;
+               dma-names = "rxtx";
                gpmc,num-cs = <8>;
                gpmc,num-waitpins = <2>;
                #address-cells = <2>;
diff --git a/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt b/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt
new file mode 100644 (file)
index 0000000..4898070
--- /dev/null
@@ -0,0 +1,32 @@
+* Atmel Quad Serial Peripheral Interface (QSPI)
+
+Required properties:
+- compatible:     Should be "atmel,sama5d2-qspi".
+- reg:            Should contain the locations and lengths of the base registers
+                  and the mapped memory.
+- reg-names:      Should contain the resource reg names:
+                  - qspi_base: configuration register address space
+                  - qspi_mmap: memory mapped address space
+- interrupts:     Should contain the interrupt for the device.
+- clocks:         The phandle of the clock needed by the QSPI controller.
+- #address-cells: Should be <1>.
+- #size-cells:    Should be <0>.
+
+Example:
+
+spi@f0020000 {
+       compatible = "atmel,sama5d2-qspi";
+       reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>;
+       reg-names = "qspi_base", "qspi_mmap";
+       interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>;
+       clocks = <&spi0_clk>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_spi0_default>;
+       status = "okay";
+
+       m25p80@0 {
+               ...
+       };
+};
index 7066597c9a81850af6db19424b4c7449baf828b1..b40f3a49280057e79530d3d72c90b464a203c044 100644 (file)
@@ -27,6 +27,7 @@ Required properties:
                          brcm,brcmnand-v6.2
                          brcm,brcmnand-v7.0
                          brcm,brcmnand-v7.1
+                         brcm,brcmnand-v7.2
                          brcm,brcmnand
 - reg              : the register start and length for NAND register region.
                      (optional) Flash DMA register range (if present)
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
new file mode 100644 (file)
index 0000000..f248056
--- /dev/null
@@ -0,0 +1,56 @@
+* Cadence Quad SPI controller
+
+Required properties:
+- compatible : Should be "cdns,qspi-nor".
+- reg : Contains two entries, each of which is a tuple consisting of a
+       physical address and length. The first entry is the address and
+       length of the controller register set. The second entry is the
+       address and length of the QSPI Controller data area.
+- interrupts : Unit interrupt specifier for the controller interrupt.
+- clocks : phandle to the Quad SPI clock.
+- cdns,fifo-depth : Size of the data FIFO in words.
+- cdns,fifo-width : Bus width of the data FIFO in bytes.
+- cdns,trigger-address : 32-bit indirect AHB trigger address.
+
+Optional properties:
+- cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
+
+Optional subnodes:
+Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional
+custom properties:
+- cdns,read-delay : Delay for read capture logic, in clock cycles
+- cdns,tshsl-ns : Delay in nanoseconds for the length that the master
+                  mode chip select outputs are de-asserted between
+                 transactions.
+- cdns,tsd2d-ns : Delay in nanoseconds between one chip select being
+                  de-activated and the activation of another.
+- cdns,tchsh-ns : Delay in nanoseconds between last bit of current
+                  transaction and deasserting the device chip select
+                 (qspi_n_ss_out).
+- cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low
+                  and first bit transfer.
+
+Example:
+
+       qspi: spi@ff705000 {
+               compatible = "cdns,qspi-nor";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0xff705000 0x1000>,
+                     <0xffa00000 0x1000>;
+               interrupts = <0 151 4>;
+               clocks = <&qspi_clk>;
+               cdns,is-decoded-cs;
+               cdns,fifo-depth = <128>;
+               cdns,fifo-width = <4>;
+               cdns,trigger-address = <0x00000000>;
+
+               flash0: n25q00@0 {
+                       ...
+                       cdns,read-delay = <4>;
+                       cdns,tshsl-ns = <50>;
+                       cdns,tsd2d-ns = <50>;
+                       cdns,tchsh-ns = <4>;
+                       cdns,tslch-ns = <4>;
+               };
+       };
index 3ee7e202657cdb83f7e430ff7b00a29ba69ed097..174f68c26c1b2a66a09c1b88dc7762d16ee54380 100644 (file)
@@ -39,7 +39,7 @@ Optional properties:
 
                "prefetch-polled"       Prefetch polled mode (default)
                "polled"                Polled mode, without prefetch
-               "prefetch-dma"          Prefetch enabled sDMA mode
+               "prefetch-dma"          Prefetch enabled DMA mode
                "prefetch-irq"          Prefetch enabled irq mode
 
  - elm_id:     <deprecated> use "ti,elm-id" instead
diff --git a/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt b/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt
new file mode 100644 (file)
index 0000000..7498152
--- /dev/null
@@ -0,0 +1,24 @@
+HiSilicon SPI-NOR Flash Controller
+
+Required properties:
+- compatible : Should be "hisilicon,fmc-spi-nor" and one of the following strings:
+               "hisilicon,hi3519-spi-nor"
+- address-cells : Should be 1.
+- size-cells : Should be 0.
+- reg : Offset and length of the register set for the controller device.
+- reg-names : Must include the following two entries: "control", "memory".
+- clocks : handle to spi-nor flash controller clock.
+
+Example:
+spi-nor-controller@10000000 {
+       compatible = "hisilicon,hi3519-spi-nor", "hisilicon,fmc-spi-nor";
+       #address-cells = <1>;
+       #size-cells = <0>;
+       reg = <0x10000000 0x1000>, <0x14000000 0x1000000>;
+       reg-names = "control", "memory";
+       clocks = <&clock HI3519_FMC_CLK>;
+       spi-nor@0 {
+               compatible = "jedec,spi-nor";
+               reg = <0>;
+       };
+};
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
new file mode 100644 (file)
index 0000000..069c192
--- /dev/null
@@ -0,0 +1,160 @@
+MTK SoCs NAND FLASH controller (NFC) DT binding
+
+This file documents the device tree bindings for MTK SoCs NAND controllers.
+The functional split of the controller requires two drivers to operate:
+the nand controller interface driver and the ECC engine driver.
+
+The hardware description for both devices must be captured as device
+tree nodes.
+
+1) NFC NAND Controller Interface (NFI):
+=======================================
+
+The first part of NFC is NAND Controller Interface (NFI) HW.
+Required NFI properties:
+- compatible:                  Should be "mediatek,mtxxxx-nfc".
+- reg:                         Base physical address and size of NFI.
+- interrupts:                  Interrupts of NFI.
+- clocks:                      NFI required clocks.
+- clock-names:                 NFI clocks internal name.
+- status:                      Disabled default. Then set "okay" by platform.
+- ecc-engine:                  Required ECC Engine node.
+- #address-cells:              NAND chip index, should be 1.
+- #size-cells:                 Should be 0.
+
+Example:
+
+       nandc: nfi@1100d000 {
+               compatible = "mediatek,mt2701-nfc";
+               reg = <0 0x1100d000 0 0x1000>;
+               interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_LOW>;
+               clocks = <&pericfg CLK_PERI_NFI>,
+                        <&pericfg CLK_PERI_NFI_PAD>;
+               clock-names = "nfi_clk", "pad_clk";
+               status = "disabled";
+               ecc-engine = <&bch>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+        };
+
+Platform related properties, should be set in {platform_name}.dts:
+- children nodes:      NAND chips.
+
+Children nodes properties:
+- reg:                 Chip Select Signal, default 0.
+                       Set as reg = <0>, <1> when need 2 CS.
+Optional:
+- nand-on-flash-bbt:   Store BBT on NAND Flash.
+- nand-ecc-mode:       the NAND ecc mode (check driver for supported modes)
+- nand-ecc-step-size:  Number of data bytes covered by a single ECC step.
+                       valid values: 512 and 1024.
+                       1024 is recommended for large page NANDs.
+- nand-ecc-strength:   Number of bits to correct per ECC step.
+                       The valid values that the controller supports are: 4, 6,
+                       8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44,
+                       48, 52, 56, 60.
+                       The strength should be calculated as follows:
+                       E = (S - F) * 8 / 14
+                       S = O / (P / Q)
+                               E :     nand-ecc-strength.
+                               S :     spare size per sector.
+                               F :     FDM size, should be in the range [1,8].
+                                       It is used to store free oob data.
+                               O :     oob size.
+                               P :     page size.
+                               Q :     nand-ecc-step-size.
+                       If the result does not match any one of the listed
+                       choices above, please select the smaller valid value from
+                       the list.
+                       (otherwise the driver will do the adjustment at runtime)
+- pinctrl-names:       Default NAND pin GPIO setting name.
+- pinctrl-0:           GPIO setting node.
+
+Example:
+       &pio {
+               nand_pins_default: nanddefault {
+                       pins_dat {
+                               pinmux = <MT2701_PIN_111_MSDC0_DAT7__FUNC_NLD7>,
+                                        <MT2701_PIN_112_MSDC0_DAT6__FUNC_NLD6>,
+                                        <MT2701_PIN_114_MSDC0_DAT4__FUNC_NLD4>,
+                                        <MT2701_PIN_118_MSDC0_DAT3__FUNC_NLD3>,
+                                        <MT2701_PIN_121_MSDC0_DAT0__FUNC_NLD0>,
+                                        <MT2701_PIN_120_MSDC0_DAT1__FUNC_NLD1>,
+                                        <MT2701_PIN_113_MSDC0_DAT5__FUNC_NLD5>,
+                                        <MT2701_PIN_115_MSDC0_RSTB__FUNC_NLD8>,
+                                        <MT2701_PIN_119_MSDC0_DAT2__FUNC_NLD2>;
+                               input-enable;
+                               drive-strength = <MTK_DRIVE_8mA>;
+                               bias-pull-up;
+                       };
+
+                       pins_we {
+                               pinmux = <MT2701_PIN_117_MSDC0_CLK__FUNC_NWEB>;
+                               drive-strength = <MTK_DRIVE_8mA>;
+                               bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+                       };
+
+                       pins_ale {
+                               pinmux = <MT2701_PIN_116_MSDC0_CMD__FUNC_NALE>;
+                               drive-strength = <MTK_DRIVE_8mA>;
+                               bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+                       };
+               };
+       };
+
+       &nandc {
+               status = "okay";
+               pinctrl-names = "default";
+               pinctrl-0 = <&nand_pins_default>;
+               nand@0 {
+                       reg = <0>;
+                       nand-on-flash-bbt;
+                       nand-ecc-mode = "hw";
+                       nand-ecc-strength = <24>;
+                       nand-ecc-step-size = <1024>;
+               };
+       };
+
+NAND chip optional subnodes:
+- Partitions, see Documentation/devicetree/bindings/mtd/partition.txt
+
+Example:
+       nand@0 {
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       preloader@0 {
+                               label = "pl";
+                               read-only;
+                               reg = <0x00000000 0x00400000>;
+                       };
+                       android@0x00400000 {
+                               label = "android";
+                               reg = <0x00400000 0x12c00000>;
+                       };
+               };
+       };
+
+2) ECC Engine:
+==============
+
+Required BCH properties:
+- compatible:  Should be "mediatek,mtxxxx-ecc".
+- reg:         Base physical address and size of ECC.
+- interrupts:  Interrupts of ECC.
+- clocks:      ECC required clocks.
+- clock-names: ECC clocks internal name.
+- status:      Disabled default. Then set "okay" by platform.
+
+Example:
+
+       bch: ecc@1100e000 {
+               compatible = "mediatek,mt2701-ecc";
+               reg = <0 0x1100e000 0 0x1000>;
+               interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>;
+               clocks = <&pericfg CLK_PERI_NFI_ECC>;
+               clock-names = "nfiecc_clk";
+               status = "disabled";
+       };
index 086d6f44c4b976b73b364d491fc290a97b00bf61..f322f56aef74eab778244c6ccaa5a5d4a4463e32 100644 (file)
@@ -11,10 +11,16 @@ Required properties:
     * "ahb" : AHB gating clock
     * "mod" : nand controller clock
 
+Optional properties:
+- dmas : shall reference DMA channel associated to the NAND controller.
+- dma-names : shall be "rxtx".
+
 Optional children nodes:
 Children nodes represent the available nand chips.
 
 Optional properties:
+- reset : phandle + reset specifier pair
+- reset-names : must contain "ahb"
 - allwinner,rb : shall contain the native Ready/Busy ids.
  or
 - rb-gpios : shall contain the gpios used as R/B pins.
diff --git a/Documentation/devicetree/bindings/pci/aardvark-pci.txt b/Documentation/devicetree/bindings/pci/aardvark-pci.txt
new file mode 100644 (file)
index 0000000..bbcd9f4
--- /dev/null
@@ -0,0 +1,56 @@
+Aardvark PCIe controller
+
+This PCIe controller is used on the Marvell Armada 3700 ARM64 SoC.
+
+The Device Tree node describing an Aardvark PCIe controller must
+contain the following properties:
+
+ - compatible: Should be "marvell,armada-3700-pcie"
+ - reg: range of registers for the PCIe controller
+ - interrupts: the interrupt line of the PCIe controller
+ - #address-cells: set to <3>
+ - #size-cells: set to <2>
+ - device_type: set to "pci"
+ - ranges: ranges for the PCI memory and I/O regions
+ - #interrupt-cells: set to <1>
+ - msi-controller: indicates that the PCIe controller can itself
+   handle MSI interrupts
+ - msi-parent: pointer to the MSI controller to be used
+ - interrupt-map-mask and interrupt-map: standard PCI properties to
+   define the mapping of the PCIe interface to interrupt numbers.
+ - bus-range: PCI bus numbers covered
+
+In addition, the Device Tree describing an Aardvark PCIe controller
+must include a sub-node that describes the legacy interrupt controller
+built into the PCIe controller. This sub-node must have the following
+properties:
+
+ - interrupt-controller
+ - #interrupt-cells: set to <1>
+
+Example:
+
+       pcie0: pcie@d0070000 {
+               compatible = "marvell,armada-3700-pcie";
+               device_type = "pci";
+               status = "disabled";
+               reg = <0 0xd0070000 0 0x20000>;
+               #address-cells = <3>;
+               #size-cells = <2>;
+               bus-range = <0x00 0xff>;
+               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+               #interrupt-cells = <1>;
+               msi-controller;
+               msi-parent = <&pcie0>;
+               ranges = <0x82000000 0 0xe8000000   0 0xe8000000 0 0x1000000 /* Port 0 MEM */
+                         0x81000000 0 0xe9000000   0 0xe9000000 0 0x10000>; /* Port 0 IO*/
+               interrupt-map-mask = <0 0 0 7>;
+               interrupt-map = <0 0 0 1 &pcie_intc 0>,
+                               <0 0 0 2 &pcie_intc 1>,
+                               <0 0 0 3 &pcie_intc 2>,
+                               <0 0 0 4 &pcie_intc 3>;
+               pcie_intc: interrupt-controller {
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
new file mode 100644 (file)
index 0000000..330a45b
--- /dev/null
@@ -0,0 +1,46 @@
+* Axis ARTPEC-6 PCIe interface
+
+This PCIe host controller is based on the Synopsys DesignWare PCIe IP
+and thus inherits all the common properties defined in designware-pcie.txt.
+
+Required properties:
+- compatible: "axis,artpec6-pcie", "snps,dw-pcie"
+- reg: base addresses and lengths of the PCIe controller (DBI),
+       the phy controller, and configuration address space.
+- reg-names: Must include the following entries:
+       - "dbi"
+       - "phy"
+       - "config"
+- interrupts: A list of interrupt outputs of the controller. Must contain an
+  entry for each entry in the interrupt-names property.
+- interrupt-names: Must include the following entries:
+       - "msi": The interrupt that is asserted when an MSI is received
+- axis,syscon-pcie: A phandle pointing to the ARTPEC-6 system controller,
+       used to enable and control the Synopsys IP.
+
+Example:
+
+       pcie@f8050000 {
+               compatible = "axis,artpec6-pcie", "snps,dw-pcie";
+               reg = <0xf8050000 0x2000
+                      0xf8040000 0x1000
+                      0xc0000000 0x1000>;
+               reg-names = "dbi", "phy", "config";
+               #address-cells = <3>;
+               #size-cells = <2>;
+               device_type = "pci";
+                         /* downstream I/O */
+               ranges = <0x81000000 0 0x00010000 0xc0010000 0 0x00010000
+                         /* non-prefetchable memory */
+                         0x82000000 0 0xc0020000 0xc0020000 0 0x1ffe0000>;
+               num-lanes = <2>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "msi";
+               #interrupt-cells = <1>;
+               interrupt-map-mask = <0 0 0 0x7>;
+               interrupt-map = <0 0 0 1 &intc GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 2 &intc GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 3 &intc GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 4 &intc GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+               axis,syscon-pcie = <&syscon>;
+       };
index e24aa11e8f8a1db6fe0ce50a846019c9c91ca3cd..6420290120593a009742c330c1b0e315874dad9c 100644 (file)
@@ -3021,6 +3021,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                resource_alignment=
                                Format:
                                [<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
+                               [<order of align>@]pci:<vendor>:<device>\
+                                               [:<subvendor>:<subdevice>][; ...]
                                Specifies alignment and device to reassign
                                aligned memory resources.
                                If <order of align> is not specified,
@@ -3039,6 +3041,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                hpmemsize=nn[KMG]       The fixed amount of bus space which is
                                reserved for hotplug bridge's memory window.
                                Default size is 2 megabytes.
+               hpbussize=nn    The minimum amount of additional bus numbers
+                               reserved for buses below a hotplug bridge.
+                               Default is 1.
                realloc=        Enable/disable reallocating PCI bridge resources
                                if allocations done by BIOS are too small to
                                accommodate resources required by all child
@@ -3070,6 +3075,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                compat  Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
                        ports driver.
 
+       pcie_port_pm=   [PCIE] PCIe port power management handling:
+               off     Disable power management of all PCIe ports
+               force   Forcibly enable power management of all PCIe ports
+
        pcie_pme=       [PCIE,PM] Native PCIe PME signaling options:
                nomsi   Do not use MSI for native PCIe PME signaling (this makes
                        all PCIe root ports use INTx for all services).
index 8f950e2752dee6afccafa3a26c58663e5f8d5eb5..10074ff03c574322526868e38aa8d949d852541c 100644 (file)
@@ -8883,6 +8883,7 @@ L:        linux-pci@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/linux-pci/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
 S:     Supported
+F:     Documentation/devicetree/bindings/pci/
 F:     Documentation/PCI/
 F:     drivers/pci/
 F:     include/linux/pci*
@@ -8946,6 +8947,13 @@ L:       linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/pci/host/*mvebu*
 
+PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
+M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L:     linux-pci@vger.kernel.org
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     drivers/pci/host/pci-aardvark.c
+
 PCI DRIVER FOR NVIDIA TEGRA
 M:     Thierry Reding <thierry.reding@gmail.com>
 L:     linux-tegra@vger.kernel.org
@@ -9028,6 +9036,15 @@ S:       Maintained
 F:     Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
 F:     drivers/pci/host/pci-xgene-msi.c
 
+PCIE DRIVER FOR AXIS ARTPEC
+M:     Niklas Cassel <niklas.cassel@axis.com>
+M:     Jesper Nilsson <jesper.nilsson@axis.com>
+L:     linux-arm-kernel@axis.com
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/axis,artpec*
+F:     drivers/pci/host/*artpec*
+
 PCIE DRIVER FOR HISILICON
 M:     Zhou Wang <wangzhou1@hisilicon.com>
 M:     Gabriele Paoloni <gabriele.paoloni@huawei.com>
index fc3dc0b90be2e0d23bffe6f334cac4565b936bcf..2d601d769a1cdddae7bbba2bb22571731e3d4f5e 100644 (file)
@@ -700,7 +700,7 @@ config ARCH_VIRT
        depends on ARCH_MULTI_V7
        select ARM_AMBA
        select ARM_GIC
-       select ARM_GIC_V2M if PCI_MSI
+       select ARM_GIC_V2M if PCI
        select ARM_GIC_V3
        select ARM_PSCI
        select HAVE_ARM_ARCH_TIMER
index 0070e8520cd447932ece57098d091c511ed1b895..2d88af5be45fd6313126b1a6001643bcdac117c9 100644 (file)
@@ -22,6 +22,7 @@ struct hw_pci {
        struct msi_controller *msi_ctrl;
        struct pci_ops  *ops;
        int             nr_controllers;
+       unsigned int    io_optional:1;
        void            **private_data;
        int             (*setup)(int nr, struct pci_sys_data *);
        struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
index 05e61a2eeabe9e24aaa5011d2f05dd769019a485..2f0e07735d1d4715234d94d6bd0e0d7c49f442c0 100644 (file)
@@ -410,7 +410,8 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return irq;
 }
 
-static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
+static int pcibios_init_resource(int busnr, struct pci_sys_data *sys,
+                                int io_optional)
 {
        int ret;
        struct resource_entry *window;
@@ -420,6 +421,14 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
                         &iomem_resource, sys->mem_offset);
        }
 
+       /*
+        * If a platform says I/O port support is optional, we don't add
+        * the default I/O space.  The platform is responsible for adding
+        * any I/O space it needs.
+        */
+       if (io_optional)
+               return 0;
+
        resource_list_for_each_entry(window, &sys->resources)
                if (resource_type(window->res) == IORESOURCE_IO)
                        return 0;
@@ -466,7 +475,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
                if (ret > 0) {
                        struct pci_host_bridge *host_bridge;
 
-                       ret = pcibios_init_resources(nr, sys);
+                       ret = pcibios_init_resource(nr, sys, hw->io_optional);
                        if (ret)  {
                                kfree(sys);
                                break;
@@ -515,25 +524,23 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
        list_for_each_entry(sys, &head, node) {
                struct pci_bus *bus = sys->bus;
 
-               if (!pci_has_flag(PCI_PROBE_ONLY)) {
+               /*
+                * We insert PCI resources into the iomem_resource and
+                * ioport_resource trees in either pci_bus_claim_resources()
+                * or pci_bus_assign_resources().
+                */
+               if (pci_has_flag(PCI_PROBE_ONLY)) {
+                       pci_bus_claim_resources(bus);
+               } else {
                        struct pci_bus *child;
 
-                       /*
-                        * Size the bridge windows.
-                        */
                        pci_bus_size_bridges(bus);
-
-                       /*
-                        * Assign resources.
-                        */
                        pci_bus_assign_resources(bus);
 
                        list_for_each_entry(child, &bus->children, node)
                                pcie_bus_configure_settings(child);
                }
-               /*
-                * Tell drivers about devices found.
-                */
+
                pci_bus_add_devices(bus);
        }
 }
@@ -590,18 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
        return start;
 }
 
-/**
- * pcibios_enable_device - Enable I/O and memory.
- * @dev: PCI device to be enabled
- */
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
-       if (pci_has_flag(PCI_PROBE_ONLY))
-               return 0;
-
-       return pci_enable_resources(dev, mask);
-}
-
 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                        enum pci_mmap_state mmap_state, int write_combine)
 {
index 1b06db571fffb899b9f1149afde2c54ddd92f4aa..69c8787bec7d3f3e343b592b01997667f3f8c53d 100644 (file)
@@ -3,6 +3,7 @@ config ARM64
        select ACPI_CCA_REQUIRED if ACPI
        select ACPI_GENERIC_GSI if ACPI
        select ACPI_REDUCED_HARDWARE_ONLY if ACPI
+       select ACPI_MCFG if ACPI
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -22,9 +23,9 @@ config ARM64
        select ARM_ARCH_TIMER
        select ARM_GIC
        select AUDIT_ARCH_COMPAT_GENERIC
-       select ARM_GIC_V2M if PCI_MSI
+       select ARM_GIC_V2M if PCI
        select ARM_GIC_V3
-       select ARM_GIC_V3_ITS if PCI_MSI
+       select ARM_GIC_V3_ITS if PCI
        select ARM_PSCI_FW
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
@@ -102,6 +103,7 @@ config ARM64
        select OF_EARLY_FLATTREE
        select OF_NUMA if NUMA && OF
        select OF_RESERVED_MEM
+       select PCI_ECAM if ACPI
        select PERF_USE_VMALLOC
        select POWER_RESET
        select POWER_SUPPLY
index 86110a6ae33095ac5af652a80052ac566b5f9c7b..1372e9a6aaa457d4c687fcfbd9e05c17adcc9c1f 100644 (file)
@@ -76,3 +76,8 @@
 &usb3 {
        status = "okay";
 };
+
+/* CON17 (PCIe) / CON12 (mini-PCIe) */
+&pcie0 {
+       status = "okay";
+};
index eb29280962d7482ada7a80228cffe2792011c173..c4762538ec0100dadfc8876f4f749d3b7bdc3a34 100644 (file)
                                      <0x1d40000 0x40000>; /* GICR */
                        };
                };
+
+               pcie0: pcie@d0070000 {
+                       compatible = "marvell,armada-3700-pcie";
+                       device_type = "pci";
+                       status = "disabled";
+                       reg = <0 0xd0070000 0 0x20000>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       bus-range = <0x00 0xff>;
+                       interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+                       #interrupt-cells = <1>;
+                       msi-parent = <&pcie0>;
+                       msi-controller;
+                       ranges = <0x82000000 0 0xe8000000   0 0xe8000000 0 0x1000000 /* Port 0 MEM */
+                                 0x81000000 0 0xe9000000   0 0xe9000000 0 0x10000>; /* Port 0 IO*/
+                       interrupt-map-mask = <0 0 0 7>;
+                       interrupt-map = <0 0 0 1 &pcie_intc 0>,
+                                       <0 0 0 2 &pcie_intc 1>,
+                                       <0 0 0 3 &pcie_intc 2>,
+                                       <0 0 0 4 &pcie_intc 3>;
+                       pcie_intc: interrupt-controller {
+                               interrupt-controller;
+                               #interrupt-cells = <1>;
+                       };
+               };
        };
 };
index 3c4e308b40a0ef53fb0e89303bfe75b3c2f0fb6a..acf38722457b1d24a4aad62f167cef415b1538c3 100644 (file)
@@ -17,6 +17,9 @@
 #include <linux/mm.h>
 #include <linux/of_pci.h>
 #include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
 #include <linux/slab.h>
 
 /*
@@ -36,25 +39,17 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
        return res->start;
 }
 
-/**
- * pcibios_enable_device - Enable I/O and memory.
- * @dev: PCI device to be enabled
- * @mask: bitmask of BARs to enable
- */
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
-       if (pci_has_flag(PCI_PROBE_ONLY))
-               return 0;
-
-       return pci_enable_resources(dev, mask);
-}
-
 /*
- * Try to assign the IRQ number from DT when adding a new device
+ * Try to assign the IRQ number when probing a new device
  */
-int pcibios_add_device(struct pci_dev *dev)
+int pcibios_alloc_irq(struct pci_dev *dev)
 {
-       dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+       if (acpi_disabled)
+               dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+#ifdef CONFIG_ACPI
+       else
+               return acpi_pci_irq_enable(dev);
+#endif
 
        return 0;
 }
@@ -65,13 +60,21 @@ int pcibios_add_device(struct pci_dev *dev)
 int raw_pci_read(unsigned int domain, unsigned int bus,
                  unsigned int devfn, int reg, int len, u32 *val)
 {
-       return -ENXIO;
+       struct pci_bus *b = pci_find_bus(domain, bus);
+
+       if (!b)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       return b->ops->read(b, devfn, reg, len, val);
 }
 
 int raw_pci_write(unsigned int domain, unsigned int bus,
                unsigned int devfn, int reg, int len, u32 val)
 {
-       return -ENXIO;
+       struct pci_bus *b = pci_find_bus(domain, bus);
+
+       if (!b)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       return b->ops->write(b, devfn, reg, len, val);
 }
 
 #ifdef CONFIG_NUMA
@@ -85,10 +88,124 @@ EXPORT_SYMBOL(pcibus_to_node);
 #endif
 
 #ifdef CONFIG_ACPI
-/* Root bridge scanning */
+
+struct acpi_pci_generic_root_info {
+       struct acpi_pci_root_info       common;
+       struct pci_config_window        *cfg;   /* config space mapping */
+};
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+       struct pci_config_window *cfg = bus->sysdata;
+       struct acpi_device *adev = to_acpi_device(cfg->parent);
+       struct acpi_pci_root *root = acpi_driver_data(adev);
+
+       return root->segment;
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+       if (!acpi_disabled) {
+               struct pci_config_window *cfg = bridge->bus->sysdata;
+               struct acpi_device *adev = to_acpi_device(cfg->parent);
+               ACPI_COMPANION_SET(&bridge->dev, adev);
+       }
+
+       return 0;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+       struct resource *bus_res = &root->secondary;
+       u16 seg = root->segment;
+       struct pci_config_window *cfg;
+       struct resource cfgres;
+       unsigned int bsz;
+
+       /* Use address from _CBA if present, otherwise lookup MCFG */
+       if (!root->mcfg_addr)
+               root->mcfg_addr = pci_mcfg_lookup(seg, bus_res);
+
+       if (!root->mcfg_addr) {
+               dev_err(&root->device->dev, "%04x:%pR ECAM region not found\n",
+                       seg, bus_res);
+               return NULL;
+       }
+
+       bsz = 1 << pci_generic_ecam_ops.bus_shift;
+       cfgres.start = root->mcfg_addr + bus_res->start * bsz;
+       cfgres.end = cfgres.start + resource_size(bus_res) * bsz - 1;
+       cfgres.flags = IORESOURCE_MEM;
+       cfg = pci_ecam_create(&root->device->dev, &cfgres, bus_res,
+                             &pci_generic_ecam_ops);
+       if (IS_ERR(cfg)) {
+               dev_err(&root->device->dev, "%04x:%pR error %ld mapping ECAM\n",
+                       seg, bus_res, PTR_ERR(cfg));
+               return NULL;
+       }
+
+       return cfg;
+}
+
+/* release_info: free resources allocated by init_info */
+static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
+{
+       struct acpi_pci_generic_root_info *ri;
+
+       ri = container_of(ci, struct acpi_pci_generic_root_info, common);
+       pci_ecam_free(ri->cfg);
+       kfree(ri);
+}
+
+static struct acpi_pci_root_ops acpi_pci_root_ops = {
+       .release_info = pci_acpi_generic_release_info,
+};
+
+/* Interface called from ACPI code to setup PCI host controller */
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 {
-       /* TODO: Should be revisited when implementing PCI on ACPI */
-       return NULL;
+       int node = acpi_get_node(root->device->handle);
+       struct acpi_pci_generic_root_info *ri;
+       struct pci_bus *bus, *child;
+
+       ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
+       if (!ri)
+               return NULL;
+
+       ri->cfg = pci_acpi_setup_ecam_mapping(root);
+       if (!ri->cfg) {
+               kfree(ri);
+               return NULL;
+       }
+
+       acpi_pci_root_ops.pci_ops = &ri->cfg->ops->pci_ops;
+       bus = acpi_pci_root_create(root, &acpi_pci_root_ops, &ri->common,
+                                  ri->cfg);
+       if (!bus)
+               return NULL;
+
+       pci_bus_size_bridges(bus);
+       pci_bus_assign_resources(bus);
+
+       list_for_each_entry(child, &bus->children, node)
+               pcie_bus_configure_settings(child);
+
+       return bus;
 }
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+       acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+       acpi_pci_remove_bus(bus);
+}
+
 #endif
index 60d57c5900324619b155a4969d88b162973fa373..bdc25aa4346850b2fe793082dadd17310f2ec44d 100644 (file)
@@ -397,7 +397,7 @@ static int __init init_axis_flash(void)
        if (!romfs_in_flash) {
                /* Create an RAM device for the root partition (romfs). */
 
-#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
+#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0)
                /* No use trying to boot this kernel from RAM. Panic! */
                printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
                       "device due to kernel (mis)configuration!\n");
index bd10d3ba0949029b9831ec8f8a5596d817cc6a37..87656c41fec7a6ba3bd7c34114352aae3e0473d9 100644 (file)
@@ -320,7 +320,7 @@ static int __init init_axis_flash(void)
         * but its size must be configured as 0 so as not to conflict
         * with our usage.
         */
-#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
+#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0)
        if (!romfs_in_flash && !nand_boot) {
                printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
                       "device; configure CONFIG_MTD_MTDRAM with size = 0!\n");
index fc3ecb55f1b23130efa35233e08ee77153e4a237..2a120bb70e544a9502af6615fe7f65bf1f303a53 100644 (file)
@@ -82,9 +82,6 @@ extern pgprot_t       pci_phys_mem_access_prot(struct file *file,
                                         pgprot_t prot);
 
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
-                                const struct resource *rsrc,
-                                resource_size_t *start, resource_size_t *end);
 
 extern void pcibios_setup_bus_devices(struct pci_bus *bus);
 extern void pcibios_setup_bus_self(struct pci_bus *bus);
index 14cba600da7ae4fff9573cfcb27465e8f322e73d..81556b843a8ed24228cbf9ed768e9d67649085c8 100644 (file)
@@ -218,33 +218,6 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
        return NULL;
 }
 
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
-                                     pgprot_t protection,
-                                     enum pci_mmap_state mmap_state,
-                                     int write_combine)
-{
-       pgprot_t prot = protection;
-
-       /* Write combine is always 0 on non-memory space mappings. On
-        * memory space, if the user didn't pass 1, we check for a
-        * "prefetchable" resource. This is a bit hackish, but we use
-        * this to workaround the inability of /sysfs to provide a write
-        * combine bit
-        */
-       if (mmap_state != pci_mmap_mem)
-               write_combine = 0;
-       else if (write_combine == 0) {
-               if (rp->flags & IORESOURCE_PREFETCH)
-                       write_combine = 1;
-       }
-
-       return pgprot_noncached(prot);
-}
-
 /*
  * This one is used by /dev/mem and fbdev who have no clue about the
  * PCI device, it tries to find the PCI device first and calls the
@@ -317,9 +290,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                return -EINVAL;
 
        vma->vm_pgoff = offset >> PAGE_SHIFT;
-       vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
-                                                 vma->vm_page_prot,
-                                                 mmap_state, write_combine);
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
@@ -473,39 +444,25 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
                          const struct resource *rsrc,
                          resource_size_t *start, resource_size_t *end)
 {
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       resource_size_t offset = 0;
+       struct pci_bus_region region;
 
-       if (hose == NULL)
+       if (rsrc->flags & IORESOURCE_IO) {
+               pcibios_resource_to_bus(dev->bus, &region,
+                                       (struct resource *) rsrc);
+               *start = region.start;
+               *end = region.end;
                return;
+       }
 
-       if (rsrc->flags & IORESOURCE_IO)
-               offset = (unsigned long)hose->io_base_virt - _IO_BASE;
-
-       /* We pass a fully fixed up address to userland for MMIO instead of
-        * a BAR value because X is lame and expects to be able to use that
-        * to pass to /dev/mem !
+       /* We pass a CPU physical address to userland for MMIO instead of a
+        * BAR value because X is lame and expects to be able to use that
+        * to pass to /dev/mem!
         *
-        * That means that we'll have potentially 64 bits values where some
-        * userland apps only expect 32 (like X itself since it thinks only
-        * Sparc has 64 bits MMIO) but if we don't do that, we break it on
-        * 32 bits CHRPs :-(
-        *
-        * Hopefully, the sysfs insterface is immune to that gunk. Once X
-        * has been fixed (and the fix spread enough), we can re-enable the
-        * 2 lines below and pass down a BAR value to userland. In that case
-        * we'll also have to re-enable the matching code in
-        * __pci_mmap_make_offset().
-        *
-        * BenH.
+        * That means we may have 64-bit values where some apps only expect
+        * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
         */
-#if 0
-       else if (rsrc->flags & IORESOURCE_MEM)
-               offset = hose->pci_mem_offset;
-#endif
-
-       *start = rsrc->start - offset;
-       *end = rsrc->end - offset;
+       *start = rsrc->start;
+       *end = rsrc->end;
 }
 
 /**
index 86b239d9d75d3e6bfb75ec020d26e8e49bc4798b..9b63cd41213de1290e7a06c31791a2eae74d7487 100644 (file)
@@ -80,16 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
 
-static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
-               const struct resource *rsrc, resource_size_t *start,
-               resource_size_t *end)
-{
-       phys_addr_t size = resource_size(rsrc);
-
-       *start = fixup_bigphys_addr(rsrc->start, size);
-       *end = rsrc->start + size;
-}
-
 /*
  * Dynamic DMA mapping stuff.
  * MIPS has everything mapped statically.
index f1b11f0dea2d8e71e8f5b6cbf65ad4d9e54d0501..b4c02f29663e180aeb5635aba2f4d9eabd24980c 100644 (file)
@@ -112,7 +112,14 @@ static void pcibios_scanbus(struct pci_controller *hose)
                need_domain_info = 1;
        }
 
-       if (!pci_has_flag(PCI_PROBE_ONLY)) {
+       /*
+        * We insert PCI resources into the iomem_resource and
+        * ioport_resource trees in either pci_bus_claim_resources()
+        * or pci_bus_assign_resources().
+        */
+       if (pci_has_flag(PCI_PROBE_ONLY)) {
+               pci_bus_claim_resources(bus);
+       } else {
                pci_bus_size_bridges(bus);
                pci_bus_assign_resources(bus);
        }
@@ -319,6 +326,16 @@ void pcibios_fixup_bus(struct pci_bus *bus)
 EXPORT_SYMBOL(PCIBIOS_MIN_IO);
 EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
 
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+                         const struct resource *rsrc, resource_size_t *start,
+                         resource_size_t *end)
+{
+       phys_addr_t size = resource_size(rsrc);
+
+       *start = fixup_bigphys_addr(rsrc->start, size);
+       *end = rsrc->start + size;
+}
+
 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                        enum pci_mmap_state mmap_state, int write_combine)
 {
index a6f3ac0d4602fd8b0ecb2db73ea3d6b52ef46efe..e9bd6cf0212fdbc33e14b0b9b7a192dbc775b5b0 100644 (file)
@@ -136,9 +136,6 @@ extern pgprot_t     pci_phys_mem_access_prot(struct file *file,
                                         pgprot_t prot);
 
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
-                                const struct resource *rsrc,
-                                resource_size_t *start, resource_size_t *end);
 
 extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
 extern void pcibios_setup_bus_devices(struct pci_bus *bus);
index f93942b4b6a61a661aae7e4da92605eded111bd4..a5c0153ede37f21d6dd8f47beac764587789fbc5 100644 (file)
@@ -411,36 +411,6 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
        return NULL;
 }
 
-/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
-                                     pgprot_t protection,
-                                     enum pci_mmap_state mmap_state,
-                                     int write_combine)
-{
-
-       /* Write combine is always 0 on non-memory space mappings. On
-        * memory space, if the user didn't pass 1, we check for a
-        * "prefetchable" resource. This is a bit hackish, but we use
-        * this to workaround the inability of /sysfs to provide a write
-        * combine bit
-        */
-       if (mmap_state != pci_mmap_mem)
-               write_combine = 0;
-       else if (write_combine == 0) {
-               if (rp->flags & IORESOURCE_PREFETCH)
-                       write_combine = 1;
-       }
-
-       /* XXX would be nice to have a way to ask for write-through */
-       if (write_combine)
-               return pgprot_noncached_wc(protection);
-       else
-               return pgprot_noncached(protection);
-}
-
 /*
  * This one is used by /dev/mem and fbdev who have no clue about the
  * PCI device, it tries to find the PCI device first and calls the
@@ -514,9 +484,10 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                return -EINVAL;
 
        vma->vm_pgoff = offset >> PAGE_SHIFT;
-       vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
-                                                 vma->vm_page_prot,
-                                                 mmap_state, write_combine);
+       if (write_combine)
+               vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
+       else
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
@@ -666,39 +637,25 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
                          const struct resource *rsrc,
                          resource_size_t *start, resource_size_t *end)
 {
-       struct pci_controller *hose = pci_bus_to_host(dev->bus);
-       resource_size_t offset = 0;
+       struct pci_bus_region region;
 
-       if (hose == NULL)
+       if (rsrc->flags & IORESOURCE_IO) {
+               pcibios_resource_to_bus(dev->bus, &region,
+                                       (struct resource *) rsrc);
+               *start = region.start;
+               *end = region.end;
                return;
+       }
 
-       if (rsrc->flags & IORESOURCE_IO)
-               offset = (unsigned long)hose->io_base_virt - _IO_BASE;
-
-       /* We pass a fully fixed up address to userland for MMIO instead of
-        * a BAR value because X is lame and expects to be able to use that
-        * to pass to /dev/mem !
-        *
-        * That means that we'll have potentially 64 bits values where some
-        * userland apps only expect 32 (like X itself since it thinks only
-        * Sparc has 64 bits MMIO) but if we don't do that, we break it on
-        * 32 bits CHRPs :-(
-        *
-        * Hopefully, the sysfs insterface is immune to that gunk. Once X
-        * has been fixed (and the fix spread enough), we can re-enable the
-        * 2 lines below and pass down a BAR value to userland. In that case
-        * we'll also have to re-enable the matching code in
-        * __pci_mmap_make_offset().
+       /* We pass a CPU physical address to userland for MMIO instead of a
+        * BAR value because X is lame and expects to be able to use that
+        * to pass to /dev/mem!
         *
-        * BenH.
+        * That means we may have 64-bit values where some apps only expect
+        * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
         */
-#if 0
-       else if (rsrc->flags & IORESOURCE_MEM)
-               offset = hose->pci_mem_offset;
-#endif
-
-       *start = rsrc->start - offset;
-       *end = rsrc->end - offset;
+       *start = rsrc->start;
+       *end = rsrc->end;
 }
 
 /**
index 022d16008a00d8a3c82de515f0e0ca8c6f7001c0..2303635158f56fc9b5653e5a5949642328d9cfe5 100644 (file)
@@ -55,9 +55,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 }
 
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
-void pci_resource_to_user(const struct pci_dev *dev, int bar,
-                         const struct resource *rsrc,
-                         resource_size_t *start, resource_size_t *end);
 #endif /* __KERNEL__ */
 
 #endif /* __SPARC64_PCI_H */
index c2b202d763a16ae74d1d150ed6e7dd940341d7dd..9c1878f4fa9f31a69f0849252ef160550689fcd5 100644 (file)
@@ -986,16 +986,18 @@ void pci_resource_to_user(const struct pci_dev *pdev, int bar,
                          const struct resource *rp, resource_size_t *start,
                          resource_size_t *end)
 {
-       struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
-       unsigned long offset;
-
-       if (rp->flags & IORESOURCE_IO)
-               offset = pbm->io_space.start;
-       else
-               offset = pbm->mem_space.start;
+       struct pci_bus_region region;
 
-       *start = rp->start - offset;
-       *end = rp->end - offset;
+       /*
+        * "User" addresses are shown in /sys/devices/pci.../.../resource
+        * and /proc/bus/pci/devices and used as mmap offsets for
+        * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
+        *
+        * On sparc, these are PCI bus addresses, i.e., raw BAR values.
+        */
+       pcibios_resource_to_bus(pdev->bus, &region, (struct resource *) rp);
+       *start = region.start;
+       *end = region.end;
 }
 
 void pcibios_set_master(struct pci_dev *dev)
index d45fa5f3e9c41dae187ebebf59f18c8b595cef9c..62137d13c6f9d2d75783dca25139217f23b09d54 100644 (file)
@@ -265,10 +265,8 @@ static int __init pci_common_init(void)
 
        pci_fixup_irqs(pci_common_swizzle, pci_puv3_map_irq);
 
-       if (!pci_has_flag(PCI_PROBE_ONLY)) {
-               pci_bus_size_bridges(puv3_bus);
-               pci_bus_assign_resources(puv3_bus);
-       }
+       pci_bus_size_bridges(puv3_bus);
+       pci_bus_assign_resources(puv3_bus);
        pci_bus_add_devices(puv3_bus);
        return 0;
 }
@@ -279,9 +277,6 @@ char * __init pcibios_setup(char *str)
        if (!strcmp(str, "debug")) {
                debug_pci = 1;
                return NULL;
-       } else if (!strcmp(str, "firmware")) {
-               pci_add_flags(PCI_PROBE_ONLY);
-               return NULL;
        }
        return str;
 }
index 8196054fedb0450d56978936d769c7189a86ad85..7b6a9d14c8c0a27f6222982fd90d3224ae366c3e 100644 (file)
@@ -133,7 +133,7 @@ static void pcibios_fixup_device_resources(struct pci_dev *dev)
        if (pci_probe & PCI_NOASSIGN_BARS) {
                /*
                * If the BIOS did not assign the BAR, zero out the
-               * resource so the kernel doesn't attmept to assign
+               * resource so the kernel doesn't attempt to assign
                * it later on in pci_assign_unassigned_resources
                */
                for (bar = 0; bar <= PCI_STD_RESOURCE_END; bar++) {
index 613cac7395c471f0a79ef6cb2254421304264cd0..e88b4176260fea0d0d19fcaa021b56a5d2a330f4 100644 (file)
@@ -119,10 +119,11 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 static void vmd_irq_enable(struct irq_data *data)
 {
        struct vmd_irq *vmdirq = data->chip_data;
+       unsigned long flags;
 
-       raw_spin_lock(&list_lock);
+       raw_spin_lock_irqsave(&list_lock, flags);
        list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
-       raw_spin_unlock(&list_lock);
+       raw_spin_unlock_irqrestore(&list_lock, flags);
 
        data->chip->irq_unmask(data);
 }
@@ -130,12 +131,14 @@ static void vmd_irq_enable(struct irq_data *data)
 static void vmd_irq_disable(struct irq_data *data)
 {
        struct vmd_irq *vmdirq = data->chip_data;
+       unsigned long flags;
 
        data->chip->irq_mask(data);
 
-       raw_spin_lock(&list_lock);
+       raw_spin_lock_irqsave(&list_lock, flags);
        list_del_rcu(&vmdirq->node);
-       raw_spin_unlock(&list_lock);
+       INIT_LIST_HEAD_RCU(&vmdirq->node);
+       raw_spin_unlock_irqrestore(&list_lock, flags);
 }
 
 /*
@@ -166,16 +169,20 @@ static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
  * XXX: We can be even smarter selecting the best IRQ once we solve the
  * affinity problem.
  */
-static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd)
+static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
 {
-       int i, best = 0;
+       int i, best = 1;
+       unsigned long flags;
 
-       raw_spin_lock(&list_lock);
+       if (!desc->msi_attrib.is_msix || vmd->msix_count == 1)
+               return &vmd->irqs[0];
+
+       raw_spin_lock_irqsave(&list_lock, flags);
        for (i = 1; i < vmd->msix_count; i++)
                if (vmd->irqs[i].count < vmd->irqs[best].count)
                        best = i;
        vmd->irqs[best].count++;
-       raw_spin_unlock(&list_lock);
+       raw_spin_unlock_irqrestore(&list_lock, flags);
 
        return &vmd->irqs[best];
 }
@@ -184,14 +191,15 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
                        unsigned int virq, irq_hw_number_t hwirq,
                        msi_alloc_info_t *arg)
 {
-       struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus);
+       struct msi_desc *desc = arg->desc;
+       struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
        struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
 
        if (!vmdirq)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&vmdirq->node);
-       vmdirq->irq = vmd_next_irq(vmd);
+       vmdirq->irq = vmd_next_irq(vmd, desc);
        vmdirq->virq = virq;
 
        irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
@@ -203,11 +211,12 @@ static void vmd_msi_free(struct irq_domain *domain,
                        struct msi_domain_info *info, unsigned int virq)
 {
        struct vmd_irq *vmdirq = irq_get_chip_data(virq);
+       unsigned long flags;
 
        /* XXX: Potential optimization to rebalance */
-       raw_spin_lock(&list_lock);
+       raw_spin_lock_irqsave(&list_lock, flags);
        vmdirq->irq->count--;
-       raw_spin_unlock(&list_lock);
+       raw_spin_unlock_irqrestore(&list_lock, flags);
 
        kfree_rcu(vmdirq, rcu);
 }
@@ -261,7 +270,7 @@ static struct device *to_vmd_dev(struct device *dev)
 
 static struct dma_map_ops *vmd_dma_ops(struct device *dev)
 {
-       return to_vmd_dev(dev)->archdata.dma_ops;
+       return get_dma_ops(to_vmd_dev(dev));
 }
 
 static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
@@ -367,7 +376,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
 {
        struct dma_domain *domain = &vmd->dma_domain;
 
-       if (vmd->dev->dev.archdata.dma_ops)
+       if (get_dma_ops(&vmd->dev->dev))
                del_dma_domain(domain);
 }
 
@@ -379,7 +388,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
 
 static void vmd_setup_dma_ops(struct vmd_dev *vmd)
 {
-       const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops;
+       const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
        struct dma_map_ops *dest = &vmd->dma_ops;
        struct dma_domain *domain = &vmd->dma_domain;
 
@@ -594,7 +603,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
        sd->node = pcibus_to_node(vmd->dev->bus);
 
        vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info,
-                                                   NULL);
+                                                   x86_vector_domain);
        if (!vmd->irq_domain)
                return -ENODEV;
 
index aebd944bdaa125e6c0cd52dc475fe366c0feb4ba..445ce28475b3061d8f19abcff8e28550718e90e1 100644 (file)
@@ -221,6 +221,9 @@ config ACPI_PROCESSOR_IDLE
        bool
        select CPU_IDLE
 
+config ACPI_MCFG
+       bool
+
 config ACPI_CPPC_LIB
        bool
        depends on ACPI_PROCESSOR
index 35a6ccbe302580ecf713d5ec623168dc7ab48a0d..5ae9d85c5159b64bef844a86e74a5ca3c53f2954 100644 (file)
@@ -40,6 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
 acpi-y                         += ec.o
 acpi-$(CONFIG_ACPI_DOCK)       += dock.o
 acpi-y                         += pci_root.o pci_link.o pci_irq.o
+obj-$(CONFIG_ACPI_MCFG)                += pci_mcfg.o
 acpi-y                         += acpi_lpss.o acpi_apd.o
 acpi-y                         += acpi_platform.o
 acpi-y                         += acpi_pnp.o
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
new file mode 100644 (file)
index 0000000..b5b376e
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *     Author: Jayachandran C <jchandra@broadcom.com>
+ * Copyright (C) 2016 Semihalf
+ *     Author: Tomasz Nowicki <tn@semihalf.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+
+/* Structure to hold entries from the MCFG table */
+struct mcfg_entry {
+       struct list_head        list;
+       phys_addr_t             addr;
+       u16                     segment;
+       u8                      bus_start;
+       u8                      bus_end;
+};
+
+/* List to save MCFG entries */
+static LIST_HEAD(pci_mcfg_list);
+
+phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
+{
+       struct mcfg_entry *e;
+
+       /*
+        * We expect exact match, unless MCFG entry end bus covers more than
+        * specified by caller.
+        */
+       list_for_each_entry(e, &pci_mcfg_list, list) {
+               if (e->segment == seg && e->bus_start == bus_res->start &&
+                   e->bus_end >= bus_res->end)
+                       return e->addr;
+       }
+
+       return 0;
+}
+
+static __init int pci_mcfg_parse(struct acpi_table_header *header)
+{
+       struct acpi_table_mcfg *mcfg;
+       struct acpi_mcfg_allocation *mptr;
+       struct mcfg_entry *e, *arr;
+       int i, n;
+
+       if (header->length < sizeof(struct acpi_table_mcfg))
+               return -EINVAL;
+
+       n = (header->length - sizeof(struct acpi_table_mcfg)) /
+                                       sizeof(struct acpi_mcfg_allocation);
+       mcfg = (struct acpi_table_mcfg *)header;
+       mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
+
+       arr = kcalloc(n, sizeof(*arr), GFP_KERNEL);
+       if (!arr)
+               return -ENOMEM;
+
+       for (i = 0, e = arr; i < n; i++, mptr++, e++) {
+               e->segment = mptr->pci_segment;
+               e->addr =  mptr->address;
+               e->bus_start = mptr->start_bus_number;
+               e->bus_end = mptr->end_bus_number;
+               list_add(&e->list, &pci_mcfg_list);
+       }
+
+       pr_info("MCFG table detected, %d entries\n", n);
+       return 0;
+}
+
+/* Interface called by ACPI - parse and save MCFG table */
+void __init pci_mmcfg_late_init(void)
+{
+       int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse);
+       if (err)
+               pr_err("Failed to parse MCFG (%d)\n", err);
+}
index ae3fe4e642035b2d51b2e3f6c4d93e68a6cb1bcc..d144168d4ef9dbfb66f45383ee62ab9bf00c0438 100644 (file)
@@ -720,6 +720,36 @@ next:
        }
 }
 
+static void acpi_pci_root_remap_iospace(struct resource_entry *entry)
+{
+#ifdef PCI_IOBASE
+       struct resource *res = entry->res;
+       resource_size_t cpu_addr = res->start;
+       resource_size_t pci_addr = cpu_addr - entry->offset;
+       resource_size_t length = resource_size(res);
+       unsigned long port;
+
+       if (pci_register_io_range(cpu_addr, length))
+               goto err;
+
+       port = pci_address_to_pio(cpu_addr);
+       if (port == (unsigned long)-1)
+               goto err;
+
+       res->start = port;
+       res->end = port + length - 1;
+       entry->offset = port - pci_addr;
+
+       if (pci_remap_iospace(res, cpu_addr) < 0)
+               goto err;
+
+       pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res);
+       return;
+err:
+       res->flags |= IORESOURCE_DISABLED;
+#endif
+}
+
 int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
 {
        int ret;
@@ -740,6 +770,9 @@ int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
                        "no IO and memory resources present in _CRS\n");
        else {
                resource_list_for_each_entry_safe(entry, tmp, list) {
+                       if (entry->res->flags & IORESOURCE_IO)
+                               acpi_pci_root_remap_iospace(entry);
+
                        if (entry->res->flags & IORESOURCE_DISABLED)
                                resource_list_destroy_entry(entry);
                        else
@@ -811,6 +844,8 @@ static void acpi_pci_root_release_info(struct pci_host_bridge *bridge)
 
        resource_list_for_each_entry(entry, &bridge->windows) {
                res = entry->res;
+               if (res->flags & IORESOURCE_IO)
+                       pci_unmap_iospace(res);
                if (res->parent &&
                    (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
                        release_resource(res);
index 5495a5ba80390db482b7cb7e2c06c4cb4e6290ad..7f8728984f4472eeb9efa4303f4d68433570d346 100644 (file)
@@ -21,9 +21,9 @@ config ARM_GIC_MAX_NR
 
 config ARM_GIC_V2M
        bool
-       depends on ARM_GIC
-       depends on PCI && PCI_MSI
-       select PCI_MSI_IRQ_DOMAIN
+       depends on PCI
+       select ARM_GIC
+       select PCI_MSI
 
 config GIC_NON_BANKED
        bool
@@ -37,7 +37,8 @@ config ARM_GIC_V3
 
 config ARM_GIC_V3_ITS
        bool
-       select PCI_MSI_IRQ_DOMAIN
+       depends on PCI
+       depends on PCI_MSI
 
 config ARM_NVIC
        bool
@@ -62,13 +63,13 @@ config ARM_VIC_NR
 config ARMADA_370_XP_IRQ
        bool
        select GENERIC_IRQ_CHIP
-       select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+       select PCI_MSI if PCI
 
 config ALPINE_MSI
        bool
-       depends on PCI && PCI_MSI
+       depends on PCI
+       select PCI_MSI
        select GENERIC_IRQ_CHIP
-       select PCI_MSI_IRQ_DOMAIN
 
 config ATMEL_AIC_IRQ
        bool
@@ -117,7 +118,6 @@ config HISILICON_IRQ_MBIGEN
        bool
        select ARM_GIC_V3
        select ARM_GIC_V3_ITS
-       select GENERIC_MSI_IRQ_DOMAIN
 
 config IMGPDC_IRQ
        bool
@@ -250,12 +250,10 @@ config IRQ_MXS
 
 config MVEBU_ODMI
        bool
-       select GENERIC_MSI_IRQ_DOMAIN
 
 config LS_SCFG_MSI
        def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
        depends on PCI && PCI_MSI
-       select PCI_MSI_IRQ_DOMAIN
 
 config PARTITION_PERCPU
        bool
index 133712346911dd694b28cda4cc80fcb86636fdd9..4b4c0c3c3d2feab9d8958684bcd52d8f2ab64419 100644 (file)
@@ -115,7 +115,7 @@ config FSL_CORENET_CF
 
 config FSL_IFC
        bool
-       depends on FSL_SOC
+       depends on FSL_SOC || ARCH_LAYERSCAPE
 
 config JZ4780_NEMC
        bool "Ingenic JZ4780 SoC NEMC driver"
index 904b4af5f1424ef978d317ba052f3a268d111818..1b182b117f9cf3fbcacfcd9a3e343a3d66e47ab2 100644 (file)
@@ -31,7 +31,9 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/fsl_ifc.h>
-#include <asm/prom.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
 EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
index 4cf8f82cfca2567bf7642d4c8ba7f8fafc2e8fea..a70b853fa2c99b330583189e5b4b103771531eed 100644 (file)
@@ -182,7 +182,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd)
  */
 static int genwqe_bus_reset(struct genwqe_dev *cd)
 {
-       int bars, rc = 0;
+       int rc = 0;
        struct pci_dev *pci_dev = cd->pci_dev;
        void __iomem *mmio;
 
@@ -193,8 +193,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd)
        cd->mmio = NULL;
        pci_iounmap(pci_dev, mmio);
 
-       bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
-       pci_release_selected_regions(pci_dev, bars);
+       pci_release_mem_regions(pci_dev);
 
        /*
         * Firmware/BIOS might change memory mapping during bus reset.
@@ -218,7 +217,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd)
                            GENWQE_INJECT_GFIR_FATAL |
                            GENWQE_INJECT_GFIR_INFO);
 
-       rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+       rc = pci_request_mem_regions(pci_dev, genwqe_driver_name);
        if (rc) {
                dev_err(&pci_dev->dev,
                        "[%s] err: request bars failed (%d)\n", __func__, rc);
@@ -1068,10 +1067,9 @@ static int genwqe_health_check_stop(struct genwqe_dev *cd)
  */
 static int genwqe_pci_setup(struct genwqe_dev *cd)
 {
-       int err, bars;
+       int err;
        struct pci_dev *pci_dev = cd->pci_dev;
 
-       bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
        err = pci_enable_device_mem(pci_dev);
        if (err) {
                dev_err(&pci_dev->dev,
@@ -1080,7 +1078,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
        }
 
        /* Reserve PCI I/O and memory resources */
-       err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+       err = pci_request_mem_regions(pci_dev, genwqe_driver_name);
        if (err) {
                dev_err(&pci_dev->dev,
                        "[%s] err: request bars failed (%d)\n", __func__, err);
@@ -1142,7 +1140,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
  out_iounmap:
        pci_iounmap(pci_dev, cd->mmio);
  out_release_resources:
-       pci_release_selected_regions(pci_dev, bars);
+       pci_release_mem_regions(pci_dev);
  err_disable_device:
        pci_disable_device(pci_dev);
  err_out:
@@ -1154,14 +1152,12 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
  */
 static void genwqe_pci_remove(struct genwqe_dev *cd)
 {
-       int bars;
        struct pci_dev *pci_dev = cd->pci_dev;
 
        if (cd->mmio)
                pci_iounmap(pci_dev, cd->mmio);
 
-       bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
-       pci_release_selected_regions(pci_dev, bars);
+       pci_release_mem_regions(pci_dev);
        pci_disable_device(pci_dev);
 }
 
index 9a1a6ffd16b87902d9ee9caf02efab300a014ef5..94d3eb42c4d5f2c55dc6db811ae3a5280e58f95e 100644 (file)
@@ -416,7 +416,7 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
        return ret;
 }
 
-static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
                                  unsigned long adr, const u_char *buf, int len)
 {
        struct cfi_private *cfi = map->fldrv_priv;
index 64a248556d290b6ac5db0ff09046bea96a77f51f..58329d2dacd1f74da67e9c45e001b01de41d4e94 100644 (file)
@@ -113,12 +113,12 @@ config MTD_SST25L
          if you want to specify device partitioning.
 
 config MTD_BCM47XXSFLASH
-       tristate "R/O support for serial flash on BCMA bus"
+       tristate "Support for serial flash on BCMA bus"
        depends on BCMA_SFLASH && (MIPS || ARM)
        help
          BCMA bus can have various flash memories attached, they are
          registered by bcma as platform devices. This enables driver for
-         serial flash memories (only read-only mode is implemented).
+         serial flash memories.
 
 config MTD_SLRAM
        tristate "Uncached system RAM"
@@ -171,18 +171,6 @@ config MTDRAM_ERASE_SIZE
          as a module, it is also possible to specify this as a parameter when
          loading the module.
 
-#If not a module (I don't want to test it as a module)
-config MTDRAM_ABS_POS
-       hex "SRAM Hexadecimal Absolute position or 0"
-       depends on MTD_MTDRAM=y
-       default "0"
-       help
-         If you have system RAM accessible by the CPU but not used by Linux
-         in normal operation, you can give the physical address at which the
-         available RAM starts, and the MTDRAM driver will use it instead of
-         allocating space from Linux's available memory. Otherwise, leave
-         this set to zero. Most people will want to leave this as zero.
-
 config MTD_BLOCK2MTD
        tristate "MTD using block device"
        depends on BLOCK
index 9d6854467651774182ab8895198d41ed7caaf945..9cf7fcd280340ea2b5f72de499b7bba7e6893f34 100644 (file)
@@ -73,14 +73,15 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
        return spi_write(spi, flash->command, len + 1);
 }
 
-static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
-                       size_t *retlen, const u_char *buf)
+static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+                           const u_char *buf)
 {
        struct m25p *flash = nor->priv;
        struct spi_device *spi = flash->spi;
        struct spi_transfer t[2] = {};
        struct spi_message m;
        int cmd_sz = m25p_cmdsz(nor);
+       ssize_t ret;
 
        spi_message_init(&m);
 
@@ -98,9 +99,14 @@ static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
        t[1].len = len;
        spi_message_add_tail(&t[1], &m);
 
-       spi_sync(spi, &m);
+       ret = spi_sync(spi, &m);
+       if (ret)
+               return ret;
 
-       *retlen += m.actual_length - cmd_sz;
+       ret = m.actual_length - cmd_sz;
+       if (ret < 0)
+               return -EIO;
+       return ret;
 }
 
 static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
@@ -119,21 +125,21 @@ static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
  * Read an address range from the nor chip.  The address range
  * may be any size provided it is within the physical boundaries.
  */
-static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
-                       size_t *retlen, u_char *buf)
+static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
+                          u_char *buf)
 {
        struct m25p *flash = nor->priv;
        struct spi_device *spi = flash->spi;
        struct spi_transfer t[2];
        struct spi_message m;
        unsigned int dummy = nor->read_dummy;
+       ssize_t ret;
 
        /* convert the dummy cycles to the number of bytes */
        dummy /= 8;
 
        if (spi_flash_read_supported(spi)) {
                struct spi_flash_read_message msg;
-               int ret;
 
                memset(&msg, 0, sizeof(msg));
 
@@ -149,8 +155,9 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
                msg.data_nbits = m25p80_rx_nbits(nor);
 
                ret = spi_flash_read(spi, &msg);
-               *retlen = msg.retlen;
-               return ret;
+               if (ret < 0)
+                       return ret;
+               return msg.retlen;
        }
 
        spi_message_init(&m);
@@ -165,13 +172,17 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
 
        t[1].rx_buf = buf;
        t[1].rx_nbits = m25p80_rx_nbits(nor);
-       t[1].len = len;
+       t[1].len = min(len, spi_max_transfer_size(spi));
        spi_message_add_tail(&t[1], &m);
 
-       spi_sync(spi, &m);
+       ret = spi_sync(spi, &m);
+       if (ret)
+               return ret;
 
-       *retlen = m.actual_length - m25p_cmdsz(nor) - dummy;
-       return 0;
+       ret = m.actual_length - m25p_cmdsz(nor) - dummy;
+       if (ret < 0)
+               return -EIO;
+       return ret;
 }
 
 /*
index 22f3858c0364688fb4f6e40a3fa11268ff7c8f49..3fad35942895c0186dfa08d22ab655fc0a4ed37f 100644 (file)
@@ -186,7 +186,7 @@ static int of_flash_probe(struct platform_device *dev)
         * consists internally of 2 non-identical NOR chips on one die.
         */
        p = of_get_property(dp, "reg", &count);
-       if (count % reg_tuple_size != 0) {
+       if (!p || count % reg_tuple_size != 0) {
                dev_err(&dev->dev, "Malformed reg property on %s\n",
                                dev->dev.of_node->full_name);
                err = -EINVAL;
index 744ca5cacc9b2e8b6f10189dc169f29426cad9e9..f9fa3fad728e5e3b0687c5affadd7cf8d0b129b0 100644 (file)
@@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
 
        printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
 
-       msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
+       msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
        if (!msp_flash)
                return -ENOMEM;
 
-       msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
+       msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
        if (!msp_parts)
                goto free_msp_flash;
 
-       msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
+       msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
        if (!msp_maps)
                goto free_msp_parts;
 
index 142fc3d794637366cc4a4996b5e07f882d14b7a2..784c6e1a0391e92c90723e698d8bc148fe3e4916 100644 (file)
@@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
 
                info->mtd = mtd_concat_create(cdev, info->num_subdev,
                                              plat->name);
-               if (info->mtd == NULL)
+               if (info->mtd == NULL) {
                        ret = -ENXIO;
+                       goto err;
+               }
        }
        info->mtd->dev.parent = &pdev->dev;
 
index f05e0e9eb2f73e8b807d97fb30cf4638d07286d9..21ff58099f3bfd5c042040d2ee7e4bbcbdb6a074 100644 (file)
@@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC
 
 config MTD_NAND_FSL_IFC
        tristate "NAND support for Freescale IFC controller"
-       depends on MTD_NAND && FSL_SOC
+       depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE)
        select FSL_IFC
        select MEMORY
        help
@@ -539,7 +539,6 @@ config MTD_NAND_FSMC
 config MTD_NAND_XWAY
        tristate "Support for NAND on Lantiq XWAY SoC"
        depends on LANTIQ && SOC_TYPE_XWAY
-       select MTD_NAND_PLATFORM
        help
          Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
          to the External Bus Unit (EBU).
@@ -563,4 +562,11 @@ config MTD_NAND_QCOM
          Enables support for NAND flash chips on SoCs containing the EBI2 NAND
          controller. This controller is found on IPQ806x SoC.
 
+config MTD_NAND_MTK
+       tristate "Support for NAND controller on MTK SoCs"
+       depends on HAS_DMA
+       help
+         Enables support for NAND controller on MTK SoCs.
+         This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+
 endif # MTD_NAND
index f55335373f7c5b755c4363574ed7abc211ec89c7..cafde6f3d95761263d4c5af1395b11bfc000ca9b 100644 (file)
@@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI)          += sunxi_nand.o
 obj-$(CONFIG_MTD_NAND_HISI504)         += hisi504_nand.o
 obj-$(CONFIG_MTD_NAND_BRCMNAND)                += brcmnand/
 obj-$(CONFIG_MTD_NAND_QCOM)            += qcom_nandc.o
+obj-$(CONFIG_MTD_NAND_MTK)             += mtk_nand.o mtk_ecc.o
 
 nand-objs := nand_base.o nand_bbt.o nand_timings.o
index b76ad7c0144f7501e7de328c8c0175690df3a039..8eb2c64df38c333c3da874cac51b9695ac534de8 100644 (file)
@@ -340,6 +340,36 @@ static const u16 brcmnand_regs_v71[] = {
        [BRCMNAND_FC_BASE]              = 0x400,
 };
 
+/* BRCMNAND v7.2 */
+static const u16 brcmnand_regs_v72[] = {
+       [BRCMNAND_CMD_START]            =  0x04,
+       [BRCMNAND_CMD_EXT_ADDRESS]      =  0x08,
+       [BRCMNAND_CMD_ADDRESS]          =  0x0c,
+       [BRCMNAND_INTFC_STATUS]         =  0x14,
+       [BRCMNAND_CS_SELECT]            =  0x18,
+       [BRCMNAND_CS_XOR]               =  0x1c,
+       [BRCMNAND_LL_OP]                =  0x20,
+       [BRCMNAND_CS0_BASE]             =  0x50,
+       [BRCMNAND_CS1_BASE]             =     0,
+       [BRCMNAND_CORR_THRESHOLD]       =  0xdc,
+       [BRCMNAND_CORR_THRESHOLD_EXT]   =  0xe0,
+       [BRCMNAND_UNCORR_COUNT]         =  0xfc,
+       [BRCMNAND_CORR_COUNT]           = 0x100,
+       [BRCMNAND_CORR_EXT_ADDR]        = 0x10c,
+       [BRCMNAND_CORR_ADDR]            = 0x110,
+       [BRCMNAND_UNCORR_EXT_ADDR]      = 0x114,
+       [BRCMNAND_UNCORR_ADDR]          = 0x118,
+       [BRCMNAND_SEMAPHORE]            = 0x150,
+       [BRCMNAND_ID]                   = 0x194,
+       [BRCMNAND_ID_EXT]               = 0x198,
+       [BRCMNAND_LL_RDATA]             = 0x19c,
+       [BRCMNAND_OOB_READ_BASE]        = 0x200,
+       [BRCMNAND_OOB_READ_10_BASE]     =     0,
+       [BRCMNAND_OOB_WRITE_BASE]       = 0x400,
+       [BRCMNAND_OOB_WRITE_10_BASE]    =     0,
+       [BRCMNAND_FC_BASE]              = 0x600,
+};
+
 enum brcmnand_cs_reg {
        BRCMNAND_CS_CFG_EXT = 0,
        BRCMNAND_CS_CFG,
@@ -435,7 +465,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
        }
 
        /* Register offsets */
-       if (ctrl->nand_version >= 0x0701)
+       if (ctrl->nand_version >= 0x0702)
+               ctrl->reg_offsets = brcmnand_regs_v72;
+       else if (ctrl->nand_version >= 0x0701)
                ctrl->reg_offsets = brcmnand_regs_v71;
        else if (ctrl->nand_version >= 0x0600)
                ctrl->reg_offsets = brcmnand_regs_v60;
@@ -480,7 +512,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
        }
 
        /* Maximum spare area sector size (per 512B) */
-       if (ctrl->nand_version >= 0x0600)
+       if (ctrl->nand_version >= 0x0702)
+               ctrl->max_oob = 128;
+       else if (ctrl->nand_version >= 0x0600)
                ctrl->max_oob = 64;
        else if (ctrl->nand_version >= 0x0500)
                ctrl->max_oob = 32;
@@ -583,14 +617,20 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
        enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
        int cs = host->cs;
 
-       if (ctrl->nand_version >= 0x0600)
+       if (ctrl->nand_version >= 0x0702)
+               bits = 7;
+       else if (ctrl->nand_version >= 0x0600)
                bits = 6;
        else if (ctrl->nand_version >= 0x0500)
                bits = 5;
        else
                bits = 4;
 
-       if (ctrl->nand_version >= 0x0600) {
+       if (ctrl->nand_version >= 0x0702) {
+               if (cs >= 4)
+                       reg = BRCMNAND_CORR_THRESHOLD_EXT;
+               shift = (cs % 4) * bits;
+       } else if (ctrl->nand_version >= 0x0600) {
                if (cs >= 5)
                        reg = BRCMNAND_CORR_THRESHOLD_EXT;
                shift = (cs % 5) * bits;
@@ -631,19 +671,28 @@ enum {
 
 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
 {
-       if (ctrl->nand_version >= 0x0600)
+       if (ctrl->nand_version >= 0x0702)
+               return GENMASK(7, 0);
+       else if (ctrl->nand_version >= 0x0600)
                return GENMASK(6, 0);
        else
                return GENMASK(5, 0);
 }
 
 #define NAND_ACC_CONTROL_ECC_SHIFT     16
+#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
 
 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
 {
        u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
 
-       return mask << NAND_ACC_CONTROL_ECC_SHIFT;
+       mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+
+       /* v7.2 includes additional ECC levels */
+       if (ctrl->nand_version >= 0x0702)
+               mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+
+       return mask;
 }
 
 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
@@ -667,7 +716,9 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
 
 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
 {
-       if (ctrl->nand_version >= 0x0600)
+       if (ctrl->nand_version >= 0x0702)
+               return 9;
+       else if (ctrl->nand_version >= 0x0600)
                return 7;
        else if (ctrl->nand_version >= 0x0500)
                return 6;
@@ -773,10 +824,16 @@ enum brcmnand_llop_type {
  * Internal support functions
  ***********************************************************************/
 
-static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg)
+static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
+                                 struct brcmnand_cfg *cfg)
 {
-       return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
-               cfg->ecc_level == 15;
+       if (ctrl->nand_version <= 0x0701)
+               return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+                       cfg->ecc_level == 15;
+       else
+               return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
+                       cfg->ecc_level == 15) ||
+                       (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
 }
 
 /*
@@ -931,7 +988,7 @@ static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
        if (p->sector_size_1k)
                ecc_level <<= 1;
 
-       if (is_hamming_ecc(p)) {
+       if (is_hamming_ecc(host->ctrl, p)) {
                ecc->bytes = 3 * sectors;
                mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
                return 0;
@@ -1108,7 +1165,7 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
        ctrl->cmd_pending = cmd;
 
        intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
-       BUG_ON(!(intfc & INTFC_CTLR_READY));
+       WARN_ON(!(intfc & INTFC_CTLR_READY));
 
        mb(); /* flush previous writes */
        brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
@@ -1545,6 +1602,56 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
        return ret;
 }
 
+/*
+ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
+ * error
+ *
+ * Because the HW ECC signals an ECC error if an erase paged has even a single
+ * bitflip, we must check each ECC error to see if it is actually an erased
+ * page with bitflips, not a truly corrupted page.
+ *
+ * On a real error, return a negative error code (-EBADMSG for ECC error), and
+ * buf will contain raw data.
+ * Otherwise, buf gets filled with 0xffs and return the maximum number of
+ * bitflips-per-ECC-sector to the caller.
+ *
+ */
+static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
+                 struct nand_chip *chip, void *buf, u64 addr)
+{
+       int i, sas;
+       void *oob = chip->oob_poi;
+       int bitflips = 0;
+       int page = addr >> chip->page_shift;
+       int ret;
+
+       if (!buf) {
+               buf = chip->buffers->databuf;
+               /* Invalidate page cache */
+               chip->pagebuf = -1;
+       }
+
+       sas = mtd->oobsize / chip->ecc.steps;
+
+       /* read without ecc for verification */
+       chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+       ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+               ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
+                                                 oob, sas, NULL, 0,
+                                                 chip->ecc.strength);
+               if (ret < 0)
+                       return ret;
+
+               bitflips = max(bitflips, ret);
+       }
+
+       return bitflips;
+}
+
 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
                         u64 addr, unsigned int trans, u32 *buf, u8 *oob)
 {
@@ -1552,9 +1659,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
        struct brcmnand_controller *ctrl = host->ctrl;
        u64 err_addr = 0;
        int err;
+       bool retry = true;
 
        dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
 
+try_dmaread:
        brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
 
        if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
@@ -1575,6 +1684,34 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
        }
 
        if (mtd_is_eccerr(err)) {
+               /*
+                * On controller version and 7.0, 7.1 , DMA read after a
+                * prior PIO read that reported uncorrectable error,
+                * the DMA engine captures this error following DMA read
+                * cleared only on subsequent DMA read, so just retry once
+                * to clear a possible false error reported for current DMA
+                * read
+                */
+               if ((ctrl->nand_version == 0x0700) ||
+                   (ctrl->nand_version == 0x0701)) {
+                       if (retry) {
+                               retry = false;
+                               goto try_dmaread;
+                       }
+               }
+
+               /*
+                * Controller version 7.2 has hw encoder to detect erased page
+                * bitflips, apply sw verification for older controllers only
+                */
+               if (ctrl->nand_version < 0x0702) {
+                       err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
+                                                             addr);
+                       /* erased page bitflips corrected */
+                       if (err > 0)
+                               return err;
+               }
+
                dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
                        (unsigned long long)err_addr);
                mtd->ecc_stats.failed++;
@@ -1857,7 +1994,8 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
        return 0;
 }
 
-static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
+static void brcmnand_print_cfg(struct brcmnand_host *host,
+                              char *buf, struct brcmnand_cfg *cfg)
 {
        buf += sprintf(buf,
                "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
@@ -1868,7 +2006,7 @@ static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
                cfg->spare_area_size, cfg->device_width);
 
        /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
-       if (is_hamming_ecc(cfg))
+       if (is_hamming_ecc(host->ctrl, cfg))
                sprintf(buf, ", Hamming ECC");
        else if (cfg->sector_size_1k)
                sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
@@ -1987,7 +2125,7 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
 
        brcmnand_set_ecc_enabled(host, 1);
 
-       brcmnand_print_cfg(msg, cfg);
+       brcmnand_print_cfg(host, msg, cfg);
        dev_info(ctrl->dev, "detected %s\n", msg);
 
        /* Configure ACC_CONTROL */
@@ -1995,6 +2133,10 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
        tmp = nand_readreg(ctrl, offs);
        tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
        tmp &= ~ACC_CONTROL_RD_ERASED;
+
+       /* We need to turn on Read from erased paged protected by ECC */
+       if (ctrl->nand_version >= 0x0702)
+               tmp |= ACC_CONTROL_RD_ERASED;
        tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
        if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
                /*
@@ -2195,6 +2337,7 @@ static const struct of_device_id brcmnand_of_match[] = {
        { .compatible = "brcm,brcmnand-v6.2" },
        { .compatible = "brcm,brcmnand-v7.0" },
        { .compatible = "brcm,brcmnand-v7.1" },
+       { .compatible = "brcm,brcmnand-v7.2" },
        {},
 };
 MODULE_DEVICE_TABLE(of, brcmnand_of_match);
index d74f4ba4a6f49b45859de1b04e0a3654ca5f2d7d..731c6051d91e0fcc49f69ac74c8f6ccdfddad2f8 100644 (file)
@@ -375,6 +375,6 @@ static struct platform_driver jz4780_bch_driver = {
 module_platform_driver(jz4780_bch_driver);
 
 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
 MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
 MODULE_LICENSE("GPL v2");
index daf3c4217f4deb034a4e952cb75a237f07c7bf43..175f67da25af01912b7b8f5d45f0ff3ada5d5a68 100644 (file)
@@ -412,6 +412,6 @@ static struct platform_driver jz4780_nand_driver = {
 module_platform_driver(jz4780_nand_driver);
 
 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
 MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
new file mode 100644 (file)
index 0000000..25a4fbd
--- /dev/null
@@ -0,0 +1,530 @@
+/*
+ * MTK ECC controller driver.
+ * Copyright (C) 2016  MediaTek Inc.
+ * Authors:    Xiaolei Li              <xiaolei.li@mediatek.com>
+ *             Jorge Ramirez-Ortiz     <jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+
+#include "mtk_ecc.h"
+
+#define ECC_IDLE_MASK          BIT(0)
+#define ECC_IRQ_EN             BIT(0)
+#define ECC_OP_ENABLE          (1)
+#define ECC_OP_DISABLE         (0)
+
+#define ECC_ENCCON             (0x00)
+#define ECC_ENCCNFG            (0x04)
+#define                ECC_CNFG_4BIT           (0)
+#define                ECC_CNFG_6BIT           (1)
+#define                ECC_CNFG_8BIT           (2)
+#define                ECC_CNFG_10BIT          (3)
+#define                ECC_CNFG_12BIT          (4)
+#define                ECC_CNFG_14BIT          (5)
+#define                ECC_CNFG_16BIT          (6)
+#define                ECC_CNFG_18BIT          (7)
+#define                ECC_CNFG_20BIT          (8)
+#define                ECC_CNFG_22BIT          (9)
+#define                ECC_CNFG_24BIT          (0xa)
+#define                ECC_CNFG_28BIT          (0xb)
+#define                ECC_CNFG_32BIT          (0xc)
+#define                ECC_CNFG_36BIT          (0xd)
+#define                ECC_CNFG_40BIT          (0xe)
+#define                ECC_CNFG_44BIT          (0xf)
+#define                ECC_CNFG_48BIT          (0x10)
+#define                ECC_CNFG_52BIT          (0x11)
+#define                ECC_CNFG_56BIT          (0x12)
+#define                ECC_CNFG_60BIT          (0x13)
+#define                ECC_MODE_SHIFT          (5)
+#define                ECC_MS_SHIFT            (16)
+#define ECC_ENCDIADDR          (0x08)
+#define ECC_ENCIDLE            (0x0C)
+#define ECC_ENCPAR(x)          (0x10 + (x) * sizeof(u32))
+#define ECC_ENCIRQ_EN          (0x80)
+#define ECC_ENCIRQ_STA         (0x84)
+#define ECC_DECCON             (0x100)
+#define ECC_DECCNFG            (0x104)
+#define                DEC_EMPTY_EN            BIT(31)
+#define                DEC_CNFG_CORRECT        (0x3 << 12)
+#define ECC_DECIDLE            (0x10C)
+#define ECC_DECENUM0           (0x114)
+#define                ERR_MASK                (0x3f)
+#define ECC_DECDONE            (0x124)
+#define ECC_DECIRQ_EN          (0x200)
+#define ECC_DECIRQ_STA         (0x204)
+
+#define ECC_TIMEOUT            (500000)
+
+#define ECC_IDLE_REG(op)       ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
+#define ECC_CTL_REG(op)                ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
+#define ECC_IRQ_REG(op)                ((op) == ECC_ENCODE ? \
+                                       ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
+
+struct mtk_ecc {
+       struct device *dev;
+       void __iomem *regs;
+       struct clk *clk;
+
+       struct completion done;
+       struct mutex lock;
+       u32 sectors;
+};
+
+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
+                                    enum mtk_ecc_operation op)
+{
+       struct device *dev = ecc->dev;
+       u32 val;
+       int ret;
+
+       ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
+                                       val & ECC_IDLE_MASK,
+                                       10, ECC_TIMEOUT);
+       if (ret)
+               dev_warn(dev, "%s NOT idle\n",
+                        op == ECC_ENCODE ? "encoder" : "decoder");
+}
+
+static irqreturn_t mtk_ecc_irq(int irq, void *id)
+{
+       struct mtk_ecc *ecc = id;
+       enum mtk_ecc_operation op;
+       u32 dec, enc;
+
+       dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+       if (dec) {
+               op = ECC_DECODE;
+               dec = readw(ecc->regs + ECC_DECDONE);
+               if (dec & ecc->sectors) {
+                       ecc->sectors = 0;
+                       complete(&ecc->done);
+               } else {
+                       return IRQ_HANDLED;
+               }
+       } else {
+               enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
+               if (enc) {
+                       op = ECC_ENCODE;
+                       complete(&ecc->done);
+               } else {
+                       return IRQ_NONE;
+               }
+       }
+
+       writel(0, ecc->regs + ECC_IRQ_REG(op));
+
+       return IRQ_HANDLED;
+}
+
+static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+       u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
+       u32 reg;
+
+       switch (config->strength) {
+       case 4:
+               ecc_bit = ECC_CNFG_4BIT;
+               break;
+       case 6:
+               ecc_bit = ECC_CNFG_6BIT;
+               break;
+       case 8:
+               ecc_bit = ECC_CNFG_8BIT;
+               break;
+       case 10:
+               ecc_bit = ECC_CNFG_10BIT;
+               break;
+       case 12:
+               ecc_bit = ECC_CNFG_12BIT;
+               break;
+       case 14:
+               ecc_bit = ECC_CNFG_14BIT;
+               break;
+       case 16:
+               ecc_bit = ECC_CNFG_16BIT;
+               break;
+       case 18:
+               ecc_bit = ECC_CNFG_18BIT;
+               break;
+       case 20:
+               ecc_bit = ECC_CNFG_20BIT;
+               break;
+       case 22:
+               ecc_bit = ECC_CNFG_22BIT;
+               break;
+       case 24:
+               ecc_bit = ECC_CNFG_24BIT;
+               break;
+       case 28:
+               ecc_bit = ECC_CNFG_28BIT;
+               break;
+       case 32:
+               ecc_bit = ECC_CNFG_32BIT;
+               break;
+       case 36:
+               ecc_bit = ECC_CNFG_36BIT;
+               break;
+       case 40:
+               ecc_bit = ECC_CNFG_40BIT;
+               break;
+       case 44:
+               ecc_bit = ECC_CNFG_44BIT;
+               break;
+       case 48:
+               ecc_bit = ECC_CNFG_48BIT;
+               break;
+       case 52:
+               ecc_bit = ECC_CNFG_52BIT;
+               break;
+       case 56:
+               ecc_bit = ECC_CNFG_56BIT;
+               break;
+       case 60:
+               ecc_bit = ECC_CNFG_60BIT;
+               break;
+       default:
+               dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
+                       config->strength);
+       }
+
+       if (config->op == ECC_ENCODE) {
+               /* configure ECC encoder (in bits) */
+               enc_sz = config->len << 3;
+
+               reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+               reg |= (enc_sz << ECC_MS_SHIFT);
+               writel(reg, ecc->regs + ECC_ENCCNFG);
+
+               if (config->mode != ECC_NFI_MODE)
+                       writel(lower_32_bits(config->addr),
+                              ecc->regs + ECC_ENCDIADDR);
+
+       } else {
+               /* configure ECC decoder (in bits) */
+               dec_sz = (config->len << 3) +
+                                       config->strength * ECC_PARITY_BITS;
+
+               reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+               reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
+               reg |= DEC_EMPTY_EN;
+               writel(reg, ecc->regs + ECC_DECCNFG);
+
+               if (config->sectors)
+                       ecc->sectors = 1 << (config->sectors - 1);
+       }
+}
+
+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
+                      int sectors)
+{
+       u32 offset, i, err;
+       u32 bitflips = 0;
+
+       stats->corrected = 0;
+       stats->failed = 0;
+
+       for (i = 0; i < sectors; i++) {
+               offset = (i >> 2) << 2;
+               err = readl(ecc->regs + ECC_DECENUM0 + offset);
+               err = err >> ((i % 4) * 8);
+               err &= ERR_MASK;
+               if (err == ERR_MASK) {
+                       /* uncorrectable errors */
+                       stats->failed++;
+                       continue;
+               }
+
+               stats->corrected += err;
+               bitflips = max_t(u32, bitflips, err);
+       }
+
+       stats->bitflips = bitflips;
+}
+EXPORT_SYMBOL(mtk_ecc_get_stats);
+
+void mtk_ecc_release(struct mtk_ecc *ecc)
+{
+       clk_disable_unprepare(ecc->clk);
+       put_device(ecc->dev);
+}
+EXPORT_SYMBOL(mtk_ecc_release);
+
+static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
+{
+       mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+       writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
+
+       mtk_ecc_wait_idle(ecc, ECC_DECODE);
+       writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
+}
+
+static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
+{
+       struct platform_device *pdev;
+       struct mtk_ecc *ecc;
+
+       pdev = of_find_device_by_node(np);
+       if (!pdev || !platform_get_drvdata(pdev))
+               return ERR_PTR(-EPROBE_DEFER);
+
+       get_device(&pdev->dev);
+       ecc = platform_get_drvdata(pdev);
+       clk_prepare_enable(ecc->clk);
+       mtk_ecc_hw_init(ecc);
+
+       return ecc;
+}
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
+{
+       struct mtk_ecc *ecc = NULL;
+       struct device_node *np;
+
+       np = of_parse_phandle(of_node, "ecc-engine", 0);
+       if (np) {
+               ecc = mtk_ecc_get(np);
+               of_node_put(np);
+       }
+
+       return ecc;
+}
+EXPORT_SYMBOL(of_mtk_ecc_get);
+
+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+       enum mtk_ecc_operation op = config->op;
+       int ret;
+
+       ret = mutex_lock_interruptible(&ecc->lock);
+       if (ret) {
+               dev_err(ecc->dev, "interrupted when attempting to lock\n");
+               return ret;
+       }
+
+       mtk_ecc_wait_idle(ecc, op);
+       mtk_ecc_config(ecc, config);
+       writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
+
+       init_completion(&ecc->done);
+       writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
+
+       return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_enable);
+
+void mtk_ecc_disable(struct mtk_ecc *ecc)
+{
+       enum mtk_ecc_operation op = ECC_ENCODE;
+
+       /* find out the running operation */
+       if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
+               op = ECC_DECODE;
+
+       /* disable it */
+       mtk_ecc_wait_idle(ecc, op);
+       writew(0, ecc->regs + ECC_IRQ_REG(op));
+       writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
+
+       mutex_unlock(&ecc->lock);
+}
+EXPORT_SYMBOL(mtk_ecc_disable);
+
+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
+{
+       int ret;
+
+       ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
+       if (!ret) {
+               dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
+                       (op == ECC_ENCODE) ? "encoder" : "decoder");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_wait_done);
+
+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+                  u8 *data, u32 bytes)
+{
+       dma_addr_t addr;
+       u32 *p, len, i;
+       int ret = 0;
+
+       addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
+       ret = dma_mapping_error(ecc->dev, addr);
+       if (ret) {
+               dev_err(ecc->dev, "dma mapping error\n");
+               return -EINVAL;
+       }
+
+       config->op = ECC_ENCODE;
+       config->addr = addr;
+       ret = mtk_ecc_enable(ecc, config);
+       if (ret) {
+               dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+               return ret;
+       }
+
+       ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
+       if (ret)
+               goto timeout;
+
+       mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+
+       /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+       len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+       p = (u32 *)(data + bytes);
+
+       /* write the parity bytes generated by the ECC back to the OOB region */
+       for (i = 0; i < len; i++)
+               p[i] = readl(ecc->regs + ECC_ENCPAR(i));
+timeout:
+
+       dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+       mtk_ecc_disable(ecc);
+
+       return ret;
+}
+EXPORT_SYMBOL(mtk_ecc_encode);
+
+void mtk_ecc_adjust_strength(u32 *p)
+{
+       u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+                       40, 44, 48, 52, 56, 60};
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ecc); i++) {
+               if (*p <= ecc[i]) {
+                       if (!i)
+                               *p = ecc[i];
+                       else if (*p != ecc[i])
+                               *p = ecc[i - 1];
+                       return;
+               }
+       }
+
+       *p = ecc[ARRAY_SIZE(ecc) - 1];
+}
+EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+
+static int mtk_ecc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mtk_ecc *ecc;
+       struct resource *res;
+       int irq, ret;
+
+       ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+       if (!ecc)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ecc->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ecc->regs)) {
+               dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
+               return PTR_ERR(ecc->regs);
+       }
+
+       ecc->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(ecc->clk)) {
+               dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+               return PTR_ERR(ecc->clk);
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "failed to get irq\n");
+               return -EINVAL;
+       }
+
+       ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+       if (ret) {
+               dev_err(dev, "failed to set DMA mask\n");
+               return ret;
+       }
+
+       ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
+       if (ret) {
+               dev_err(dev, "failed to request irq\n");
+               return -EINVAL;
+       }
+
+       ecc->dev = dev;
+       mutex_init(&ecc->lock);
+       platform_set_drvdata(pdev, ecc);
+       dev_info(dev, "probed\n");
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_ecc_suspend(struct device *dev)
+{
+       struct mtk_ecc *ecc = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(ecc->clk);
+
+       return 0;
+}
+
+static int mtk_ecc_resume(struct device *dev)
+{
+       struct mtk_ecc *ecc = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare_enable(ecc->clk);
+       if (ret) {
+               dev_err(dev, "failed to enable clk\n");
+               return ret;
+       }
+
+       mtk_ecc_hw_init(ecc);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
+#endif
+
+static const struct of_device_id mtk_ecc_dt_match[] = {
+       { .compatible = "mediatek,mt2701-ecc" },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
+
+static struct platform_driver mtk_ecc_driver = {
+       .probe  = mtk_ecc_probe,
+       .driver = {
+               .name  = "mtk-ecc",
+               .of_match_table = of_match_ptr(mtk_ecc_dt_match),
+#ifdef CONFIG_PM_SLEEP
+               .pm = &mtk_ecc_pm_ops,
+#endif
+       },
+};
+
+module_platform_driver(mtk_ecc_driver);
+
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand ECC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
new file mode 100644 (file)
index 0000000..cbeba5c
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors:    Xiaolei Li              <xiaolei.li@mediatek.com>
+ *             Jorge Ramirez-Ortiz     <jorge.ramirez-ortiz@linaro.org>
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+#define ECC_PARITY_BITS                (14)
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+       u32 corrected;
+       u32 bitflips;
+       u32 failed;
+};
+
+struct mtk_ecc_config {
+       enum mtk_ecc_operation op;
+       enum mtk_ecc_mode mode;
+       dma_addr_t addr;
+       u32 strength;
+       u32 sectors;
+       u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(u32 *);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
new file mode 100644 (file)
index 0000000..ddaa2ac
--- /dev/null
@@ -0,0 +1,1526 @@
+/*
+ * MTK NAND Flash controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors:    Xiaolei Li              <xiaolei.li@mediatek.com>
+ *             Jorge Ramirez-Ortiz     <jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include "mtk_ecc.h"
+
+/* NAND controller register definition */
+#define NFI_CNFG               (0x00)
+#define                CNFG_AHB                BIT(0)
+#define                CNFG_READ_EN            BIT(1)
+#define                CNFG_DMA_BURST_EN       BIT(2)
+#define                CNFG_BYTE_RW            BIT(6)
+#define                CNFG_HW_ECC_EN          BIT(8)
+#define                CNFG_AUTO_FMT_EN        BIT(9)
+#define                CNFG_OP_CUST            (6 << 12)
+#define NFI_PAGEFMT            (0x04)
+#define                PAGEFMT_FDM_ECC_SHIFT   (12)
+#define                PAGEFMT_FDM_SHIFT       (8)
+#define                PAGEFMT_SPARE_16        (0)
+#define                PAGEFMT_SPARE_26        (1)
+#define                PAGEFMT_SPARE_27        (2)
+#define                PAGEFMT_SPARE_28        (3)
+#define                PAGEFMT_SPARE_32        (4)
+#define                PAGEFMT_SPARE_36        (5)
+#define                PAGEFMT_SPARE_40        (6)
+#define                PAGEFMT_SPARE_44        (7)
+#define                PAGEFMT_SPARE_48        (8)
+#define                PAGEFMT_SPARE_49        (9)
+#define                PAGEFMT_SPARE_50        (0xa)
+#define                PAGEFMT_SPARE_51        (0xb)
+#define                PAGEFMT_SPARE_52        (0xc)
+#define                PAGEFMT_SPARE_62        (0xd)
+#define                PAGEFMT_SPARE_63        (0xe)
+#define                PAGEFMT_SPARE_64        (0xf)
+#define                PAGEFMT_SPARE_SHIFT     (4)
+#define                PAGEFMT_SEC_SEL_512     BIT(2)
+#define                PAGEFMT_512_2K          (0)
+#define                PAGEFMT_2K_4K           (1)
+#define                PAGEFMT_4K_8K           (2)
+#define                PAGEFMT_8K_16K          (3)
+/* NFI control */
+#define NFI_CON                        (0x08)
+#define                CON_FIFO_FLUSH          BIT(0)
+#define                CON_NFI_RST             BIT(1)
+#define                CON_BRD                 BIT(8)  /* burst  read */
+#define                CON_BWR                 BIT(9)  /* burst  write */
+#define                CON_SEC_SHIFT           (12)
+/* Timming control register */
+#define NFI_ACCCON             (0x0C)
+#define NFI_INTR_EN            (0x10)
+#define                INTR_AHB_DONE_EN        BIT(6)
+#define NFI_INTR_STA           (0x14)
+#define NFI_CMD                        (0x20)
+#define NFI_ADDRNOB            (0x30)
+#define NFI_COLADDR            (0x34)
+#define NFI_ROWADDR            (0x38)
+#define NFI_STRDATA            (0x40)
+#define                STAR_EN                 (1)
+#define                STAR_DE                 (0)
+#define NFI_CNRNB              (0x44)
+#define NFI_DATAW              (0x50)
+#define NFI_DATAR              (0x54)
+#define NFI_PIO_DIRDY          (0x58)
+#define                PIO_DI_RDY              (0x01)
+#define NFI_STA                        (0x60)
+#define                STA_CMD                 BIT(0)
+#define                STA_ADDR                BIT(1)
+#define                STA_BUSY                BIT(8)
+#define                STA_EMP_PAGE            BIT(12)
+#define                NFI_FSM_CUSTDATA        (0xe << 16)
+#define                NFI_FSM_MASK            (0xf << 16)
+#define NFI_ADDRCNTR           (0x70)
+#define                CNTR_MASK               GENMASK(16, 12)
+#define NFI_STRADDR            (0x80)
+#define NFI_BYTELEN            (0x84)
+#define NFI_CSEL               (0x90)
+#define NFI_FDML(x)            (0xA0 + (x) * sizeof(u32) * 2)
+#define NFI_FDMM(x)            (0xA4 + (x) * sizeof(u32) * 2)
+#define NFI_FDM_MAX_SIZE       (8)
+#define NFI_FDM_MIN_SIZE       (1)
+#define NFI_MASTER_STA         (0x224)
+#define                MASTER_STA_MASK         (0x0FFF)
+#define NFI_EMPTY_THRESH       (0x23C)
+
+#define MTK_NAME               "mtk-nand"
+#define KB(x)                  ((x) * 1024UL)
+#define MB(x)                  (KB(x) * 1024UL)
+
+#define MTK_TIMEOUT            (500000)
+#define MTK_RESET_TIMEOUT      (1000000)
+#define MTK_MAX_SECTOR         (16)
+#define MTK_NAND_MAX_NSELS     (2)
+
+struct mtk_nfc_bad_mark_ctl {
+       void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
+       u32 sec;
+       u32 pos;
+};
+
+/*
+ * FDM: region used to store free OOB data
+ */
+struct mtk_nfc_fdm {
+       u32 reg_size;
+       u32 ecc_size;
+};
+
+struct mtk_nfc_nand_chip {
+       struct list_head node;
+       struct nand_chip nand;
+
+       struct mtk_nfc_bad_mark_ctl bad_mark;
+       struct mtk_nfc_fdm fdm;
+       u32 spare_per_sector;
+
+       int nsels;
+       u8 sels[0];
+       /* nothing after this field */
+};
+
+struct mtk_nfc_clk {
+       struct clk *nfi_clk;
+       struct clk *pad_clk;
+};
+
+struct mtk_nfc {
+       struct nand_hw_control controller;
+       struct mtk_ecc_config ecc_cfg;
+       struct mtk_nfc_clk clk;
+       struct mtk_ecc *ecc;
+
+       struct device *dev;
+       void __iomem *regs;
+
+       struct completion done;
+       struct list_head chips;
+
+       u8 *buffer;
+};
+
+static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
+{
+       return container_of(nand, struct mtk_nfc_nand_chip, nand);
+}
+
+static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+       return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *oob_ptr(struct nand_chip *chip, int i)
+{
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       u8 *poi;
+
+       /* map the sector's FDM data to free oob:
+        * the beginning of the oob area stores the FDM data of bad mark sectors
+        */
+
+       if (i < mtk_nand->bad_mark.sec)
+               poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
+       else if (i == mtk_nand->bad_mark.sec)
+               poi = chip->oob_poi;
+       else
+               poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
+
+       return poi;
+}
+
+static inline int mtk_data_len(struct nand_chip *chip)
+{
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+
+       return chip->ecc.size + mtk_nand->spare_per_sector;
+}
+
+static inline u8 *mtk_data_ptr(struct nand_chip *chip,  int i)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+       return nfc->buffer + i * mtk_data_len(chip);
+}
+
+static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+       return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
+}
+
+static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
+{
+       writel(val, nfc->regs + reg);
+}
+
+static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
+{
+       writew(val, nfc->regs + reg);
+}
+
+static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
+{
+       writeb(val, nfc->regs + reg);
+}
+
+static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
+{
+       return readl_relaxed(nfc->regs + reg);
+}
+
+static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
+{
+       return readw_relaxed(nfc->regs + reg);
+}
+
+static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
+{
+       return readb_relaxed(nfc->regs + reg);
+}
+
+static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
+{
+       struct device *dev = nfc->dev;
+       u32 val;
+       int ret;
+
+       /* reset all registers and force the NFI master to terminate */
+       nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+
+       /* wait for the master to finish the last transaction */
+       ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
+                                !(val & MASTER_STA_MASK), 50,
+                                MTK_RESET_TIMEOUT);
+       if (ret)
+               dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
+                        NFI_MASTER_STA, val);
+
+       /* ensure any status register affected by the NFI master is reset */
+       nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+       nfi_writew(nfc, STAR_DE, NFI_STRDATA);
+}
+
+static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
+{
+       struct device *dev = nfc->dev;
+       u32 val;
+       int ret;
+
+       nfi_writel(nfc, command, NFI_CMD);
+
+       ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+                                       !(val & STA_CMD), 10,  MTK_TIMEOUT);
+       if (ret) {
+               dev_warn(dev, "nfi core timed out entering command mode\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
+{
+       struct device *dev = nfc->dev;
+       u32 val;
+       int ret;
+
+       nfi_writel(nfc, addr, NFI_COLADDR);
+       nfi_writel(nfc, 0, NFI_ROWADDR);
+       nfi_writew(nfc, 1, NFI_ADDRNOB);
+
+       ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+                                       !(val & STA_ADDR), 10, MTK_TIMEOUT);
+       if (ret) {
+               dev_warn(dev, "nfi core timed out entering address mode\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       u32 fmt, spare;
+
+       if (!mtd->writesize)
+               return 0;
+
+       spare = mtk_nand->spare_per_sector;
+
+       switch (mtd->writesize) {
+       case 512:
+               fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+               break;
+       case KB(2):
+               if (chip->ecc.size == 512)
+                       fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+               else
+                       fmt = PAGEFMT_512_2K;
+               break;
+       case KB(4):
+               if (chip->ecc.size == 512)
+                       fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+               else
+                       fmt = PAGEFMT_2K_4K;
+               break;
+       case KB(8):
+               if (chip->ecc.size == 512)
+                       fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+               else
+                       fmt = PAGEFMT_4K_8K;
+               break;
+       case KB(16):
+               fmt = PAGEFMT_8K_16K;
+               break;
+       default:
+               dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
+               return -EINVAL;
+       }
+
+       /*
+        * the hardware will double the value for this eccsize, so we need to
+        * halve it
+        */
+       if (chip->ecc.size == 1024)
+               spare >>= 1;
+
+       switch (spare) {
+       case 16:
+               fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 26:
+               fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 27:
+               fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 28:
+               fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 32:
+               fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 36:
+               fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 40:
+               fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 44:
+               fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 48:
+               fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 49:
+               fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 50:
+               fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 51:
+               fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 52:
+               fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 62:
+               fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 63:
+               fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
+               break;
+       case 64:
+               fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
+               break;
+       default:
+               dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
+               return -EINVAL;
+       }
+
+       fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
+       fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+       nfi_writew(nfc, fmt, NFI_PAGEFMT);
+
+       nfc->ecc_cfg.strength = chip->ecc.strength;
+       nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
+
+       return 0;
+}
+
+static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       struct mtk_nfc *nfc = nand_get_controller_data(nand);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+       if (chip < 0)
+               return;
+
+       mtk_nfc_hw_runtime_config(mtd);
+
+       nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
+}
+
+static int mtk_nfc_dev_ready(struct mtd_info *mtd)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+       if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
+               return 0;
+
+       return 1;
+}
+
+static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+       if (ctrl & NAND_ALE) {
+               mtk_nfc_send_address(nfc, dat);
+       } else if (ctrl & NAND_CLE) {
+               mtk_nfc_hw_reset(nfc);
+
+               nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+               mtk_nfc_send_command(nfc, dat);
+       }
+}
+
+static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
+{
+       int rc;
+       u8 val;
+
+       rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
+                                      val & PIO_DI_RDY, 10, MTK_TIMEOUT);
+       if (rc < 0)
+               dev_err(nfc->dev, "data not ready\n");
+}
+
+static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       u32 reg;
+
+       /* after each byte read, the NFI_STA reg is reset by the hardware */
+       reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+       if (reg != NFI_FSM_CUSTDATA) {
+               reg = nfi_readw(nfc, NFI_CNFG);
+               reg |= CNFG_BYTE_RW | CNFG_READ_EN;
+               nfi_writew(nfc, reg, NFI_CNFG);
+
+               /*
+                * set to max sector to allow the HW to continue reading over
+                * unaligned accesses
+                */
+               reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+               nfi_writel(nfc, reg, NFI_CON);
+
+               /* trigger to fetch data */
+               nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+       }
+
+       mtk_nfc_wait_ioready(nfc);
+
+       return nfi_readb(nfc, NFI_DATAR);
+}
+
+static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               buf[i] = mtk_nfc_read_byte(mtd);
+}
+
+static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+       u32 reg;
+
+       reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+
+       if (reg != NFI_FSM_CUSTDATA) {
+               reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+               nfi_writew(nfc, reg, NFI_CNFG);
+
+               reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+               nfi_writel(nfc, reg, NFI_CON);
+
+               nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+       }
+
+       mtk_nfc_wait_ioready(nfc);
+       nfi_writeb(nfc, byte, NFI_DATAW);
+}
+
+static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               mtk_nfc_write_byte(mtd, buf[i]);
+}
+
+static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       int size = chip->ecc.size + mtk_nand->fdm.reg_size;
+
+       nfc->ecc_cfg.mode = ECC_DMA_MODE;
+       nfc->ecc_cfg.op = ECC_ENCODE;
+
+       return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
+}
+
+static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
+{
+       /* nop */
+}
+
+static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
+       u32 bad_pos = nand->bad_mark.pos;
+
+       if (raw)
+               bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
+       else
+               bad_pos += nand->bad_mark.sec * chip->ecc.size;
+
+       swap(chip->oob_poi[0], buf[bad_pos]);
+}
+
+static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
+                                 u32 len, const u8 *buf)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+       u32 start, end;
+       int i, ret;
+
+       start = offset / chip->ecc.size;
+       end = DIV_ROUND_UP(offset + len, chip->ecc.size);
+
+       memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+       for (i = 0; i < chip->ecc.steps; i++) {
+               memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+                      chip->ecc.size);
+
+               if (start > i || i >= end)
+                       continue;
+
+               if (i == mtk_nand->bad_mark.sec)
+                       mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+               memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+
+               /* program the CRC back to the OOB */
+               ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+       u32 i;
+
+       memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+       for (i = 0; i < chip->ecc.steps; i++) {
+               if (buf)
+                       memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+                              chip->ecc.size);
+
+               if (i == mtk_nand->bad_mark.sec)
+                       mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+               memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+       }
+}
+
+static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
+                                   u32 sectors)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+       u32 vall, valm;
+       u8 *oobptr;
+       int i, j;
+
+       for (i = 0; i < sectors; i++) {
+               oobptr = oob_ptr(chip, start + i);
+               vall = nfi_readl(nfc, NFI_FDML(i));
+               valm = nfi_readl(nfc, NFI_FDMM(i));
+
+               for (j = 0; j < fdm->reg_size; j++)
+                       oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+       }
+}
+
+static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+       u32 vall, valm;
+       u8 *oobptr;
+       int i, j;
+
+       for (i = 0; i < chip->ecc.steps; i++) {
+               oobptr = oob_ptr(chip, i);
+               vall = 0;
+               valm = 0;
+               for (j = 0; j < 8; j++) {
+                       if (j < 4)
+                               vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+                                               << (j * 8);
+                       else
+                               valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+                                               << ((j - 4) * 8);
+               }
+               nfi_writel(nfc, vall, NFI_FDML(i));
+               nfi_writel(nfc, valm, NFI_FDMM(i));
+       }
+}
+
+static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                                const u8 *buf, int page, int len)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct device *dev = nfc->dev;
+       dma_addr_t addr;
+       u32 reg;
+       int ret;
+
+       addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
+       ret = dma_mapping_error(nfc->dev, addr);
+       if (ret) {
+               dev_err(nfc->dev, "dma mapping error\n");
+               return -EINVAL;
+       }
+
+       reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
+       nfi_writew(nfc, reg, NFI_CNFG);
+
+       nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
+       nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+       nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+
+       init_completion(&nfc->done);
+
+       reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
+       nfi_writel(nfc, reg, NFI_CON);
+       nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+       ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+       if (!ret) {
+               dev_err(dev, "program ahb done timeout\n");
+               nfi_writew(nfc, 0, NFI_INTR_EN);
+               ret = -ETIMEDOUT;
+               goto timeout;
+       }
+
+       ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
+                                       (reg & CNTR_MASK) >= chip->ecc.steps,
+                                       10, MTK_TIMEOUT);
+       if (ret)
+               dev_err(dev, "hwecc write timeout\n");
+
+timeout:
+
+       dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
+       nfi_writel(nfc, 0, NFI_CON);
+
+       return ret;
+}
+
+static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                             const u8 *buf, int page, int raw)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       size_t len;
+       const u8 *bufpoi;
+       u32 reg;
+       int ret;
+
+       if (!raw) {
+               /* OOB => FDM: from register,  ECC: from HW */
+               reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
+               nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
+
+               nfc->ecc_cfg.op = ECC_ENCODE;
+               nfc->ecc_cfg.mode = ECC_NFI_MODE;
+               ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+               if (ret) {
+                       /* clear NFI config */
+                       reg = nfi_readw(nfc, NFI_CNFG);
+                       reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+                       nfi_writew(nfc, reg, NFI_CNFG);
+
+                       return ret;
+               }
+
+               memcpy(nfc->buffer, buf, mtd->writesize);
+               mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
+               bufpoi = nfc->buffer;
+
+               /* write OOB into the FDM registers (OOB area in MTK NAND) */
+               mtk_nfc_write_fdm(chip);
+       } else {
+               bufpoi = buf;
+       }
+
+       len = mtd->writesize + (raw ? mtd->oobsize : 0);
+       ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
+
+       if (!raw)
+               mtk_ecc_disable(nfc->ecc);
+
+       return ret;
+}
+
+static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
+                                   struct nand_chip *chip, const u8 *buf,
+                                   int oob_on, int page)
+{
+       return mtk_nfc_write_page(mtd, chip, buf, page, 0);
+}
+
+static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+                                 const u8 *buf, int oob_on, int pg)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+       mtk_nfc_format_page(mtd, buf);
+       return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
+}
+
+static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
+                                      struct nand_chip *chip, u32 offset,
+                                      u32 data_len, const u8 *buf,
+                                      int oob_on, int page)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       int ret;
+
+       ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
+       if (ret < 0)
+               return ret;
+
+       /* use the data in the private buffer (now with FDM and CRC) */
+       return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
+}
+
+static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+                                int page)
+{
+       int ret;
+
+       chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+       ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
+       if (ret < 0)
+               return -EIO;
+
+       chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+       ret = chip->waitfunc(mtd, chip);
+
+       return ret & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_ecc_stats stats;
+       int rc, i;
+
+       rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+       if (rc) {
+               memset(buf, 0xff, sectors * chip->ecc.size);
+               for (i = 0; i < sectors; i++)
+                       memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+               return 0;
+       }
+
+       mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
+       mtd->ecc_stats.corrected += stats.corrected;
+       mtd->ecc_stats.failed += stats.failed;
+
+       return stats.bitflips;
+}
+
+static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+                               u32 data_offs, u32 readlen,
+                               u8 *bufpoi, int page, int raw)
+{
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       u32 spare = mtk_nand->spare_per_sector;
+       u32 column, sectors, start, end, reg;
+       dma_addr_t addr;
+       int bitflips;
+       size_t len;
+       u8 *buf;
+       int rc;
+
+       start = data_offs / chip->ecc.size;
+       end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+
+       sectors = end - start;
+       column = start * (chip->ecc.size + spare);
+
+       len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
+       buf = bufpoi + start * chip->ecc.size;
+
+       if (column != 0)
+               chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+
+       addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
+       rc = dma_mapping_error(nfc->dev, addr);
+       if (rc) {
+               dev_err(nfc->dev, "dma mapping error\n");
+
+               return -EINVAL;
+       }
+
+       reg = nfi_readw(nfc, NFI_CNFG);
+       reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
+       if (!raw) {
+               reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
+               nfi_writew(nfc, reg, NFI_CNFG);
+
+               nfc->ecc_cfg.mode = ECC_NFI_MODE;
+               nfc->ecc_cfg.sectors = sectors;
+               nfc->ecc_cfg.op = ECC_DECODE;
+               rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+               if (rc) {
+                       dev_err(nfc->dev, "ecc enable\n");
+                       /* clear NFI_CNFG */
+                       reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
+                               CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+                       nfi_writew(nfc, reg, NFI_CNFG);
+                       dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+                       return rc;
+               }
+       } else {
+               nfi_writew(nfc, reg, NFI_CNFG);
+       }
+
+       nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
+       nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+       nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+
+       init_completion(&nfc->done);
+       reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
+       nfi_writel(nfc, reg, NFI_CON);
+       nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+       rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+       if (!rc)
+               dev_warn(nfc->dev, "read ahb/dma done timeout\n");
+
+       rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
+                                      (reg & CNTR_MASK) >= sectors, 10,
+                                      MTK_TIMEOUT);
+       if (rc < 0) {
+               dev_err(nfc->dev, "subpage done timeout\n");
+               bitflips = -EIO;
+       } else {
+               bitflips = 0;
+               if (!raw) {
+                       rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+                       bitflips = rc < 0 ? -ETIMEDOUT :
+                               mtk_nfc_update_ecc_stats(mtd, buf, sectors);
+                       mtk_nfc_read_fdm(chip, start, sectors);
+               }
+       }
+
+       dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+       if (raw)
+               goto done;
+
+       mtk_ecc_disable(nfc->ecc);
+
+       if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
+               mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
+done:
+       nfi_writel(nfc, 0, NFI_CON);
+
+       return bitflips;
+}
+
+static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
+                                     struct nand_chip *chip, u32 off,
+                                     u32 len, u8 *p, int pg)
+{
+       return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
+                                  struct nand_chip *chip, u8 *p,
+                                  int oob_on, int pg)
+{
+       return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+                                u8 *buf, int oob_on, int page)
+{
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_nfc *nfc = nand_get_controller_data(chip);
+       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+       int i, ret;
+
+       memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+       ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
+                                  page, 1);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < chip->ecc.steps; i++) {
+               memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+               if (i == mtk_nand->bad_mark.sec)
+                       mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+               if (buf)
+                       memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+                              chip->ecc.size);
+       }
+
+       return ret;
+}
+
+static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+                               int page)
+{
+       chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+       return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
+{
+       /*
+        * ACCON: access timing control register
+        * -------------------------------------
+        * 31:28: minimum required time for CS post pulling down after accessing
+        *      the device
+        * 27:22: minimum required time for CS pre pulling down before accessing
+        *      the device
+        * 21:16: minimum required time from NCEB low to NREB low
+        * 15:12: minimum required time from NWEB high to NREB low.
+        * 11:08: write enable hold time
+        * 07:04: write wait states
+        * 03:00: read wait states
+        */
+       nfi_writel(nfc, 0x10804211, NFI_ACCCON);
+
+       /*
+        * CNRNB: nand ready/busy register
+        * -------------------------------
+        * 7:4: timeout register for polling the NAND busy/ready signal
+        * 0  : poll the status of the busy/ready signal after [7:4]*16 cycles.
+        */
+       nfi_writew(nfc, 0xf1, NFI_CNRNB);
+       nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+
+       mtk_nfc_hw_reset(nfc);
+
+       nfi_readl(nfc, NFI_INTR_STA);
+       nfi_writel(nfc, 0, NFI_INTR_EN);
+}
+
+static irqreturn_t mtk_nfc_irq(int irq, void *id)
+{
+       struct mtk_nfc *nfc = id;
+       u16 sta, ien;
+
+       sta = nfi_readw(nfc, NFI_INTR_STA);
+       ien = nfi_readw(nfc, NFI_INTR_EN);
+
+       if (!(sta & ien))
+               return IRQ_NONE;
+
+       nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
+       complete(&nfc->done);
+
+       return IRQ_HANDLED;
+}
+
+static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
+{
+       int ret;
+
+       ret = clk_prepare_enable(clk->nfi_clk);
+       if (ret) {
+               dev_err(dev, "failed to enable nfi clk\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(clk->pad_clk);
+       if (ret) {
+               dev_err(dev, "failed to enable pad clk\n");
+               clk_disable_unprepare(clk->nfi_clk);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
+{
+       clk_disable_unprepare(clk->nfi_clk);
+       clk_disable_unprepare(clk->pad_clk);
+}
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+                                 struct mtd_oob_region *oob_region)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+       u32 eccsteps;
+
+       eccsteps = mtd->writesize / chip->ecc.size;
+
+       if (section >= eccsteps)
+               return -ERANGE;
+
+       oob_region->length = fdm->reg_size - fdm->ecc_size;
+       oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
+
+       return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+                                struct mtd_oob_region *oob_region)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+       u32 eccsteps;
+
+       if (section)
+               return -ERANGE;
+
+       eccsteps = mtd->writesize / chip->ecc.size;
+       oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
+       oob_region->length = mtd->oobsize - oob_region->offset;
+
+       return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+       .free = mtk_nfc_ooblayout_free,
+       .ecc = mtk_nfc_ooblayout_ecc,
+};
+
+static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+       u32 ecc_bytes;
+
+       ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+
+       fdm->reg_size = chip->spare_per_sector - ecc_bytes;
+       if (fdm->reg_size > NFI_FDM_MAX_SIZE)
+               fdm->reg_size = NFI_FDM_MAX_SIZE;
+
+       /* bad block mark storage */
+       fdm->ecc_size = 1;
+}
+
+static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
+                                    struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+
+       if (mtd->writesize == 512) {
+               bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
+       } else {
+               bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
+               bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
+               bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
+       }
+}
+
+static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
+                       48, 49, 50, 51, 52, 62, 63, 64};
+       u32 eccsteps, i;
+
+       eccsteps = mtd->writesize / nand->ecc.size;
+       *sps = mtd->oobsize / eccsteps;
+
+       if (nand->ecc.size == 1024)
+               *sps >>= 1;
+
+       for (i = 0; i < ARRAY_SIZE(spare); i++) {
+               if (*sps <= spare[i]) {
+                       if (!i)
+                               *sps = spare[i];
+                       else if (*sps != spare[i])
+                               *sps = spare[i - 1];
+                       break;
+               }
+       }
+
+       if (i >= ARRAY_SIZE(spare))
+               *sps = spare[ARRAY_SIZE(spare) - 1];
+
+       if (nand->ecc.size == 1024)
+               *sps <<= 1;
+}
+
+static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       u32 spare;
+       int free;
+
+       /* support only ecc hw mode */
+       if (nand->ecc.mode != NAND_ECC_HW) {
+               dev_err(dev, "ecc.mode not supported\n");
+               return -EINVAL;
+       }
+
+       /* if optional dt settings not present */
+       if (!nand->ecc.size || !nand->ecc.strength) {
+               /* use datasheet requirements */
+               nand->ecc.strength = nand->ecc_strength_ds;
+               nand->ecc.size = nand->ecc_step_ds;
+
+               /*
+                * align eccstrength and eccsize
+                * this controller only supports 512 and 1024 sizes
+                */
+               if (nand->ecc.size < 1024) {
+                       if (mtd->writesize > 512) {
+                               nand->ecc.size = 1024;
+                               nand->ecc.strength <<= 1;
+                       } else {
+                               nand->ecc.size = 512;
+                       }
+               } else {
+                       nand->ecc.size = 1024;
+               }
+
+               mtk_nfc_set_spare_per_sector(&spare, mtd);
+
+               /* calculate oob bytes except ecc parity data */
+               free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+               free = spare - free;
+
+               /*
+                * enhance ecc strength if oob left is bigger than max FDM size
+                * or reduce ecc strength if oob size is not enough for ecc
+                * parity data.
+                */
+               if (free > NFI_FDM_MAX_SIZE) {
+                       spare -= NFI_FDM_MAX_SIZE;
+                       nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+               } else if (free < 0) {
+                       spare -= NFI_FDM_MIN_SIZE;
+                       nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+               }
+       }
+
+       mtk_ecc_adjust_strength(&nand->ecc.strength);
+
+       dev_info(dev, "eccsize %d eccstrength %d\n",
+                nand->ecc.size, nand->ecc.strength);
+
+       return 0;
+}
+
+static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
+                                 struct device_node *np)
+{
+       struct mtk_nfc_nand_chip *chip;
+       struct nand_chip *nand;
+       struct mtd_info *mtd;
+       int nsels, len;
+       u32 tmp;
+       int ret;
+       int i;
+
+       if (!of_get_property(np, "reg", &nsels))
+               return -ENODEV;
+
+       nsels /= sizeof(u32);
+       if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
+               dev_err(dev, "invalid reg property size %d\n", nsels);
+               return -EINVAL;
+       }
+
+       chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+                           GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       chip->nsels = nsels;
+       for (i = 0; i < nsels; i++) {
+               ret = of_property_read_u32_index(np, "reg", i, &tmp);
+               if (ret) {
+                       dev_err(dev, "reg property failure : %d\n", ret);
+                       return ret;
+               }
+               chip->sels[i] = tmp;
+       }
+
+       nand = &chip->nand;
+       nand->controller = &nfc->controller;
+
+       nand_set_flash_node(nand, np);
+       nand_set_controller_data(nand, nfc);
+
+       nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
+       nand->dev_ready = mtk_nfc_dev_ready;
+       nand->select_chip = mtk_nfc_select_chip;
+       nand->write_byte = mtk_nfc_write_byte;
+       nand->write_buf = mtk_nfc_write_buf;
+       nand->read_byte = mtk_nfc_read_byte;
+       nand->read_buf = mtk_nfc_read_buf;
+       nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
+
+       /* set default mode in case dt entry is missing */
+       nand->ecc.mode = NAND_ECC_HW;
+
+       nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
+       nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
+       nand->ecc.write_page = mtk_nfc_write_page_hwecc;
+       nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
+       nand->ecc.write_oob = mtk_nfc_write_oob_std;
+
+       nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
+       nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
+       nand->ecc.read_page = mtk_nfc_read_page_hwecc;
+       nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
+       nand->ecc.read_oob = mtk_nfc_read_oob_std;
+
+       mtd = nand_to_mtd(nand);
+       mtd->owner = THIS_MODULE;
+       mtd->dev.parent = dev;
+       mtd->name = MTK_NAME;
+       mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+       mtk_nfc_hw_init(nfc);
+
+       ret = nand_scan_ident(mtd, nsels, NULL);
+       if (ret)
+               return -ENODEV;
+
+       /* store bbt magic in page, cause OOB is not protected */
+       if (nand->bbt_options & NAND_BBT_USE_FLASH)
+               nand->bbt_options |= NAND_BBT_NO_OOB;
+
+       ret = mtk_nfc_ecc_init(dev, mtd);
+       if (ret)
+               return -EINVAL;
+
+       if (nand->options & NAND_BUSWIDTH_16) {
+               dev_err(dev, "16bits buswidth not supported");
+               return -EINVAL;
+       }
+
+       mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
+       mtk_nfc_set_fdm(&chip->fdm, mtd);
+       mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
+
+       len = mtd->writesize + mtd->oobsize;
+       nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+       if (!nfc->buffer)
+               return  -ENOMEM;
+
+       ret = nand_scan_tail(mtd);
+       if (ret)
+               return -ENODEV;
+
+       ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+       if (ret) {
+               dev_err(dev, "mtd parse partition error\n");
+               nand_release(mtd);
+               return ret;
+       }
+
+       list_add_tail(&chip->node, &nfc->chips);
+
+       return 0;
+}
+
+static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
+{
+       struct device_node *np = dev->of_node;
+       struct device_node *nand_np;
+       int ret;
+
+       for_each_child_of_node(np, nand_np) {
+               ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
+               if (ret) {
+                       of_node_put(nand_np);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int mtk_nfc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct mtk_nfc *nfc;
+       struct resource *res;
+       int ret, irq;
+
+       nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+       if (!nfc)
+               return -ENOMEM;
+
+       spin_lock_init(&nfc->controller.lock);
+       init_waitqueue_head(&nfc->controller.wq);
+       INIT_LIST_HEAD(&nfc->chips);
+
+       /* probe defer if not ready */
+       nfc->ecc = of_mtk_ecc_get(np);
+       if (IS_ERR(nfc->ecc))
+               return PTR_ERR(nfc->ecc);
+       else if (!nfc->ecc)
+               return -ENODEV;
+
+       nfc->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       nfc->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(nfc->regs)) {
+               ret = PTR_ERR(nfc->regs);
+               dev_err(dev, "no nfi base\n");
+               goto release_ecc;
+       }
+
+       nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+       if (IS_ERR(nfc->clk.nfi_clk)) {
+               dev_err(dev, "no clk\n");
+               ret = PTR_ERR(nfc->clk.nfi_clk);
+               goto release_ecc;
+       }
+
+       nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+       if (IS_ERR(nfc->clk.pad_clk)) {
+               dev_err(dev, "no pad clk\n");
+               ret = PTR_ERR(nfc->clk.pad_clk);
+               goto release_ecc;
+       }
+
+       ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+       if (ret)
+               goto release_ecc;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "no nfi irq resource\n");
+               ret = -EINVAL;
+               goto clk_disable;
+       }
+
+       ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
+       if (ret) {
+               dev_err(dev, "failed to request nfi irq\n");
+               goto clk_disable;
+       }
+
+       ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+       if (ret) {
+               dev_err(dev, "failed to set dma mask\n");
+               goto clk_disable;
+       }
+
+       platform_set_drvdata(pdev, nfc);
+
+       ret = mtk_nfc_nand_chips_init(dev, nfc);
+       if (ret) {
+               dev_err(dev, "failed to init nand chips\n");
+               goto clk_disable;
+       }
+
+       return 0;
+
+clk_disable:
+       mtk_nfc_disable_clk(&nfc->clk);
+
+release_ecc:
+       mtk_ecc_release(nfc->ecc);
+
+       return ret;
+}
+
+static int mtk_nfc_remove(struct platform_device *pdev)
+{
+       struct mtk_nfc *nfc = platform_get_drvdata(pdev);
+       struct mtk_nfc_nand_chip *chip;
+
+       while (!list_empty(&nfc->chips)) {
+               chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
+                                       node);
+               nand_release(nand_to_mtd(&chip->nand));
+               list_del(&chip->node);
+       }
+
+       mtk_ecc_release(nfc->ecc);
+       mtk_nfc_disable_clk(&nfc->clk);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nfc_suspend(struct device *dev)
+{
+       struct mtk_nfc *nfc = dev_get_drvdata(dev);
+
+       mtk_nfc_disable_clk(&nfc->clk);
+
+       return 0;
+}
+
+static int mtk_nfc_resume(struct device *dev)
+{
+       struct mtk_nfc *nfc = dev_get_drvdata(dev);
+       struct mtk_nfc_nand_chip *chip;
+       struct nand_chip *nand;
+       struct mtd_info *mtd;
+       int ret;
+       u32 i;
+
+       udelay(200);
+
+       ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+       if (ret)
+               return ret;
+
+       mtk_nfc_hw_init(nfc);
+
+       /* reset NAND chip if VCC was powered off */
+       list_for_each_entry(chip, &nfc->chips, node) {
+               nand = &chip->nand;
+               mtd = nand_to_mtd(nand);
+               for (i = 0; i < chip->nsels; i++) {
+                       nand->select_chip(mtd, i);
+                       nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+               }
+       }
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
+#endif
+
+static const struct of_device_id mtk_nfc_id_table[] = {
+       { .compatible = "mediatek,mt2701-nfc" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
+static struct platform_driver mtk_nfc_driver = {
+       .probe  = mtk_nfc_probe,
+       .remove = mtk_nfc_remove,
+       .driver = {
+               .name  = MTK_NAME,
+               .of_match_table = mtk_nfc_id_table,
+#ifdef CONFIG_PM_SLEEP
+               .pm = &mtk_nfc_pm_ops,
+#endif
+       },
+};
+
+module_platform_driver(mtk_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
index 0b0dc29d2af78b59ca36bb7f706b1139e065f55a..77533f7f242937ae72e43a78e6256fb68982552f 100644 (file)
@@ -2610,7 +2610,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
                int cached = writelen > bytes && page != blockmask;
                uint8_t *wbuf = buf;
                int use_bufpoi;
-               int part_pagewr = (column || writelen < (mtd->writesize - 1));
+               int part_pagewr = (column || writelen < mtd->writesize);
 
                if (part_pagewr)
                        use_bufpoi = 1;
index ccc05f5b2695f935aa374ce4f3bb7b50e07aa879..2af9869a115e97fb72f34e19ce0b6751b30ddd11 100644 (file)
@@ -168,6 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = {
 /* Manufacturer IDs */
 struct nand_manufacturers nand_manuf_ids[] = {
        {NAND_MFR_TOSHIBA, "Toshiba"},
+       {NAND_MFR_ESMT, "ESMT"},
        {NAND_MFR_SAMSUNG, "Samsung"},
        {NAND_MFR_FUJITSU, "Fujitsu"},
        {NAND_MFR_NATIONAL, "National"},
index a136da8df6fe897d4f908d2723d4a95d8b152933..a59361c36f404ff08fa7bc314777203b7dc22a93 100644 (file)
 #define        PREFETCH_STATUS_FIFO_CNT(val)   ((val >> 24) & 0x7F)
 #define        STATUS_BUFF_EMPTY               0x00000001
 
-#define OMAP24XX_DMA_GPMC              4
-
 #define SECTOR_BYTES           512
 /* 4 bit padding to make byte aligned, 56 = 52 + 4 */
 #define BCH4_BIT_PAD           4
@@ -1811,7 +1809,6 @@ static int omap_nand_probe(struct platform_device *pdev)
        struct nand_chip                *nand_chip;
        int                             err;
        dma_cap_mask_t                  mask;
-       unsigned                        sig;
        struct resource                 *res;
        struct device                   *dev = &pdev->dev;
        int                             min_oobbytes = BADBLOCK_MARKER_LENGTH;
@@ -1924,11 +1921,11 @@ static int omap_nand_probe(struct platform_device *pdev)
        case NAND_OMAP_PREFETCH_DMA:
                dma_cap_zero(mask);
                dma_cap_set(DMA_SLAVE, mask);
-               sig = OMAP24XX_DMA_GPMC;
-               info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
-               if (!info->dma) {
+               info->dma = dma_request_chan(pdev->dev.parent, "rxtx");
+
+               if (IS_ERR(info->dma)) {
                        dev_err(&pdev->dev, "DMA engine request failed\n");
-                       err = -ENXIO;
+                       err = PTR_ERR(info->dma);
                        goto return_error;
                } else {
                        struct dma_slave_config cfg;
index a83a690688b45067a318ef14ed97970c588bb018..e414b31b71c17111e923825ec8c178a3c40e8948 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
+#include <linux/reset.h>
 
 #define NFC_REG_CTL            0x0000
 #define NFC_REG_ST             0x0004
 
 /* define bit use in NFC_ECC_ST */
 #define NFC_ECC_ERR(x)         BIT(x)
+#define NFC_ECC_ERR_MSK                GENMASK(15, 0)
 #define NFC_ECC_PAT_FOUND(x)   BIT(x + 16)
 #define NFC_ECC_ERR_CNT(b, x)  (((x) >> (((b) % 4) * 8)) & 0xff)
 
@@ -269,10 +271,12 @@ struct sunxi_nfc {
        void __iomem *regs;
        struct clk *ahb_clk;
        struct clk *mod_clk;
+       struct reset_control *reset;
        unsigned long assigned_cs;
        unsigned long clk_rate;
        struct list_head chips;
        struct completion complete;
+       struct dma_chan *dmac;
 };
 
 static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
@@ -365,6 +369,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
        return ret;
 }
 
+static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
+                                   int chunksize, int nchunks,
+                                   enum dma_data_direction ddir,
+                                   struct scatterlist *sg)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+       struct dma_async_tx_descriptor *dmad;
+       enum dma_transfer_direction tdir;
+       dma_cookie_t dmat;
+       int ret;
+
+       if (ddir == DMA_FROM_DEVICE)
+               tdir = DMA_DEV_TO_MEM;
+       else
+               tdir = DMA_MEM_TO_DEV;
+
+       sg_init_one(sg, buf, nchunks * chunksize);
+       ret = dma_map_sg(nfc->dev, sg, 1, ddir);
+       if (!ret)
+               return -ENOMEM;
+
+       dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+       if (!dmad) {
+               ret = -EINVAL;
+               goto err_unmap_buf;
+       }
+
+       writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+              nfc->regs + NFC_REG_CTL);
+       writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
+       writel(chunksize, nfc->regs + NFC_REG_CNT);
+       dmat = dmaengine_submit(dmad);
+
+       ret = dma_submit_error(dmat);
+       if (ret)
+               goto err_clr_dma_flag;
+
+       return 0;
+
+err_clr_dma_flag:
+       writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+              nfc->regs + NFC_REG_CTL);
+
+err_unmap_buf:
+       dma_unmap_sg(nfc->dev, sg, 1, ddir);
+       return ret;
+}
+
+static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
+                                    enum dma_data_direction ddir,
+                                    struct scatterlist *sg)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+       dma_unmap_sg(nfc->dev, sg, 1, ddir);
+       writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+              nfc->regs + NFC_REG_CTL);
+}
+
 static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
@@ -822,17 +887,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
 }
 
 static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
-                                   int step, bool *erased)
+                                   int step, u32 status, bool *erased)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
        struct nand_ecc_ctrl *ecc = &nand->ecc;
-       u32 status, tmp;
+       u32 tmp;
 
        *erased = false;
 
-       status = readl(nfc->regs + NFC_REG_ECC_ST);
-
        if (status & NFC_ECC_ERR(step))
                return -EBADMSG;
 
@@ -898,6 +961,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
        *cur_off = oob_off + ecc->bytes + 4;
 
        ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+                                      readl(nfc->regs + NFC_REG_ECC_ST),
                                       &erased);
        if (erased)
                return 1;
@@ -967,6 +1031,130 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
                *cur_off = mtd->oobsize + mtd->writesize;
 }
 
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
+                                           int oob_required, int page,
+                                           int nchunks)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       bool randomized = nand->options & NAND_NEED_SCRAMBLING;
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+       struct nand_ecc_ctrl *ecc = &nand->ecc;
+       unsigned int max_bitflips = 0;
+       int ret, i, raw_mode = 0;
+       struct scatterlist sg;
+       u32 status;
+
+       ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+       if (ret)
+               return ret;
+
+       ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
+                                      DMA_FROM_DEVICE, &sg);
+       if (ret)
+               return ret;
+
+       sunxi_nfc_hw_ecc_enable(mtd);
+       sunxi_nfc_randomizer_config(mtd, page, false);
+       sunxi_nfc_randomizer_enable(mtd);
+
+       writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
+              NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
+
+       dma_async_issue_pending(nfc->dmac);
+
+       writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
+              nfc->regs + NFC_REG_CMD);
+
+       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+       if (ret)
+               dmaengine_terminate_all(nfc->dmac);
+
+       sunxi_nfc_randomizer_disable(mtd);
+       sunxi_nfc_hw_ecc_disable(mtd);
+
+       sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
+
+       if (ret)
+               return ret;
+
+       status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+       for (i = 0; i < nchunks; i++) {
+               int data_off = i * ecc->size;
+               int oob_off = i * (ecc->bytes + 4);
+               u8 *data = buf + data_off;
+               u8 *oob = nand->oob_poi + oob_off;
+               bool erased;
+
+               ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
+                                              oob_required ? oob : NULL,
+                                              i, status, &erased);
+
+               /* ECC errors are handled in the second loop. */
+               if (ret < 0)
+                       continue;
+
+               if (oob_required && !erased) {
+                       /* TODO: use DMA to retrieve OOB */
+                       nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+                                     mtd->writesize + oob_off, -1);
+                       nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+                       sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
+                                                           !i, page);
+               }
+
+               if (erased)
+                       raw_mode = 1;
+
+               sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+       }
+
+       if (status & NFC_ECC_ERR_MSK) {
+               for (i = 0; i < nchunks; i++) {
+                       int data_off = i * ecc->size;
+                       int oob_off = i * (ecc->bytes + 4);
+                       u8 *data = buf + data_off;
+                       u8 *oob = nand->oob_poi + oob_off;
+
+                       if (!(status & NFC_ECC_ERR(i)))
+                               continue;
+
+                       /*
+                        * Re-read the data with the randomizer disabled to
+                        * identify bitflips in erased pages.
+                        */
+                       if (randomized) {
+                               /* TODO: use DMA to read page in raw mode */
+                               nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+                                             data_off, -1);
+                               nand->read_buf(mtd, data, ecc->size);
+                       }
+
+                       /* TODO: use DMA to retrieve OOB */
+                       nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+                                     mtd->writesize + oob_off, -1);
+                       nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+                       ret = nand_check_erased_ecc_chunk(data, ecc->size,
+                                                         oob, ecc->bytes + 4,
+                                                         NULL, 0,
+                                                         ecc->strength);
+                       if (ret >= 0)
+                               raw_mode = 1;
+
+                       sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+               }
+       }
+
+       if (oob_required)
+               sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
+                                               NULL, !raw_mode,
+                                               page);
+
+       return max_bitflips;
+}
+
 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
                                        const u8 *data, int data_off,
                                        const u8 *oob, int oob_off,
@@ -1065,6 +1253,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
        return max_bitflips;
 }
 
+static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
+                                         struct nand_chip *chip, u8 *buf,
+                                         int oob_required, int page)
+{
+       int ret;
+
+       ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
+                                              chip->ecc.steps);
+       if (ret >= 0)
+               return ret;
+
+       /* Fallback to PIO mode */
+       chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+       return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
+}
+
 static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
                                         struct nand_chip *chip,
                                         u32 data_offs, u32 readlen,
@@ -1098,6 +1303,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
        return max_bitflips;
 }
 
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
+                                            struct nand_chip *chip,
+                                            u32 data_offs, u32 readlen,
+                                            u8 *buf, int page)
+{
+       int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+       int ret;
+
+       ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
+       if (ret >= 0)
+               return ret;
+
+       /* Fallback to PIO mode */
+       chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+       return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
+                                            buf, page);
+}
+
 static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
                                       struct nand_chip *chip,
                                       const uint8_t *buf, int oob_required,
@@ -1130,6 +1354,99 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
        return 0;
 }
 
+static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
+                                         struct nand_chip *chip,
+                                         u32 data_offs, u32 data_len,
+                                         const u8 *buf, int oob_required,
+                                         int page)
+{
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       int ret, i, cur_off = 0;
+
+       sunxi_nfc_hw_ecc_enable(mtd);
+
+       for (i = data_offs / ecc->size;
+            i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
+               int data_off = i * ecc->size;
+               int oob_off = i * (ecc->bytes + 4);
+               const u8 *data = buf + data_off;
+               const u8 *oob = chip->oob_poi + oob_off;
+
+               ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+                                                  oob_off + mtd->writesize,
+                                                  &cur_off, !i, page);
+               if (ret)
+                       return ret;
+       }
+
+       sunxi_nfc_hw_ecc_disable(mtd);
+
+       return 0;
+}
+
+static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
+                                          struct nand_chip *chip,
+                                          const u8 *buf,
+                                          int oob_required,
+                                          int page)
+{
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+       struct nand_ecc_ctrl *ecc = &nand->ecc;
+       struct scatterlist sg;
+       int ret, i;
+
+       ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+       if (ret)
+               return ret;
+
+       ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
+                                      DMA_TO_DEVICE, &sg);
+       if (ret)
+               goto pio_fallback;
+
+       for (i = 0; i < ecc->steps; i++) {
+               const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+
+               sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
+       }
+
+       sunxi_nfc_hw_ecc_enable(mtd);
+       sunxi_nfc_randomizer_config(mtd, page, false);
+       sunxi_nfc_randomizer_enable(mtd);
+
+       writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
+              nfc->regs + NFC_REG_RCMD_SET);
+
+       dma_async_issue_pending(nfc->dmac);
+
+       writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
+              NFC_DATA_TRANS | NFC_ACCESS_DIR,
+              nfc->regs + NFC_REG_CMD);
+
+       ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+       if (ret)
+               dmaengine_terminate_all(nfc->dmac);
+
+       sunxi_nfc_randomizer_disable(mtd);
+       sunxi_nfc_hw_ecc_disable(mtd);
+
+       sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
+
+       if (ret)
+               return ret;
+
+       if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+               /* TODO: use DMA to transfer extra OOB bytes ? */
+               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+                                                NULL, page);
+
+       return 0;
+
+pio_fallback:
+       return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
+}
+
 static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
                                               struct nand_chip *chip,
                                               uint8_t *buf, int oob_required,
@@ -1497,10 +1814,19 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
        int ret;
        int i;
 
+       if (ecc->size != 512 && ecc->size != 1024)
+               return -EINVAL;
+
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
+       /* Prefer 1k ECC chunk over 512 ones */
+       if (ecc->size == 512 && mtd->writesize > 512) {
+               ecc->size = 1024;
+               ecc->strength *= 2;
+       }
+
        /* Add ECC info retrieval from DT */
        for (i = 0; i < ARRAY_SIZE(strengths); i++) {
                if (ecc->strength <= strengths[i])
@@ -1550,14 +1876,28 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
                                       struct nand_ecc_ctrl *ecc,
                                       struct device_node *np)
 {
+       struct nand_chip *nand = mtd_to_nand(mtd);
+       struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+       struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
        int ret;
 
        ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
        if (ret)
                return ret;
 
-       ecc->read_page = sunxi_nfc_hw_ecc_read_page;
-       ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+       if (nfc->dmac) {
+               ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
+               ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
+               ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
+               nand->options |= NAND_USE_BOUNCE_BUFFER;
+       } else {
+               ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+               ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+               ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+       }
+
+       /* TODO: support DMA for raw accesses and subpage write */
+       ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
        ecc->read_oob_raw = nand_read_oob_std;
        ecc->write_oob_raw = nand_write_oob_std;
        ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
@@ -1871,26 +2211,59 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
        if (ret)
                goto out_ahb_clk_unprepare;
 
+       nfc->reset = devm_reset_control_get_optional(dev, "ahb");
+       if (!IS_ERR(nfc->reset)) {
+               ret = reset_control_deassert(nfc->reset);
+               if (ret) {
+                       dev_err(dev, "reset err %d\n", ret);
+                       goto out_mod_clk_unprepare;
+               }
+       } else if (PTR_ERR(nfc->reset) != -ENOENT) {
+               ret = PTR_ERR(nfc->reset);
+               goto out_mod_clk_unprepare;
+       }
+
        ret = sunxi_nfc_rst(nfc);
        if (ret)
-               goto out_mod_clk_unprepare;
+               goto out_ahb_reset_reassert;
 
        writel(0, nfc->regs + NFC_REG_INT);
        ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
                               0, "sunxi-nand", nfc);
        if (ret)
-               goto out_mod_clk_unprepare;
+               goto out_ahb_reset_reassert;
+
+       nfc->dmac = dma_request_slave_channel(dev, "rxtx");
+       if (nfc->dmac) {
+               struct dma_slave_config dmac_cfg = { };
+
+               dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
+               dmac_cfg.dst_addr = dmac_cfg.src_addr;
+               dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+               dmac_cfg.src_maxburst = 4;
+               dmac_cfg.dst_maxburst = 4;
+               dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+       } else {
+               dev_warn(dev, "failed to request rxtx DMA channel\n");
+       }
 
        platform_set_drvdata(pdev, nfc);
 
        ret = sunxi_nand_chips_init(dev, nfc);
        if (ret) {
                dev_err(dev, "failed to init nand chips\n");
-               goto out_mod_clk_unprepare;
+               goto out_release_dmac;
        }
 
        return 0;
 
+out_release_dmac:
+       if (nfc->dmac)
+               dma_release_channel(nfc->dmac);
+out_ahb_reset_reassert:
+       if (!IS_ERR(nfc->reset))
+               reset_control_assert(nfc->reset);
 out_mod_clk_unprepare:
        clk_disable_unprepare(nfc->mod_clk);
 out_ahb_clk_unprepare:
@@ -1904,6 +2277,12 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
        struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
 
        sunxi_nand_chips_cleanup(nfc);
+
+       if (!IS_ERR(nfc->reset))
+               reset_control_assert(nfc->reset);
+
+       if (nfc->dmac)
+               dma_release_channel(nfc->dmac);
        clk_disable_unprepare(nfc->mod_clk);
        clk_disable_unprepare(nfc->ahb_clk);
 
index 0cf0ac07a8c25b3664234c9c350ed6daabeca919..1f2948c0c458d60ec60e898bb1b4893ddba33ecf 100644 (file)
@@ -4,6 +4,7 @@
  *  by the Free Software Foundation.
  *
  *  Copyright Â© 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright Â© 2016 Hauke Mehrtens <hauke@hauke-m.de>
  */
 
 #include <linux/mtd/nand.h>
 #define EBU_ADDSEL1            0x24
 #define EBU_NAND_CON           0xB0
 #define EBU_NAND_WAIT          0xB4
+#define  NAND_WAIT_RD          BIT(0) /* NAND flash status output */
+#define  NAND_WAIT_WR_C                BIT(3) /* NAND Write/Read complete */
 #define EBU_NAND_ECC0          0xB8
 #define EBU_NAND_ECC_AC                0xBC
 
-/* nand commands */
-#define NAND_CMD_ALE           (1 << 2)
-#define NAND_CMD_CLE           (1 << 3)
-#define NAND_CMD_CS            (1 << 4)
-#define NAND_WRITE_CMD_RESET   0xff
+/*
+ * nand commands
+ * The pins of the NAND chip are selected based on the address bits of the
+ * "register" read and write. There are no special registers, but an
+ * address range and the lower address bits are used to activate the
+ * correct line. For example when the bit (1 << 2) is set in the address
+ * the ALE pin will be activated.
+ */
+#define NAND_CMD_ALE           BIT(2) /* address latch enable */
+#define NAND_CMD_CLE           BIT(3) /* command latch enable */
+#define NAND_CMD_CS            BIT(4) /* chip select */
+#define NAND_CMD_SE            BIT(5) /* spare area access latch */
+#define NAND_CMD_WP            BIT(6) /* write protect */
 #define NAND_WRITE_CMD         (NAND_CMD_CS | NAND_CMD_CLE)
 #define NAND_WRITE_ADDR                (NAND_CMD_CS | NAND_CMD_ALE)
 #define NAND_WRITE_DATA                (NAND_CMD_CS)
 #define NAND_READ_DATA         (NAND_CMD_CS)
-#define NAND_WAIT_WR_C         (1 << 3)
-#define NAND_WAIT_RD           (0x1)
 
 /* we need to tel the ebu which addr we mapped the nand to */
 #define ADDSEL1_MASK(x)                (x << 4)
 #define NAND_CON_CSMUX         (1 << 1)
 #define NAND_CON_NANDM         1
 
-static void xway_reset_chip(struct nand_chip *chip)
+struct xway_nand_data {
+       struct nand_chip        chip;
+       unsigned long           csflags;
+       void __iomem            *nandaddr;
+};
+
+static u8 xway_readb(struct mtd_info *mtd, int op)
 {
-       unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
-       unsigned long flags;
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct xway_nand_data *data = nand_get_controller_data(chip);
 
-       nandaddr &= ~NAND_WRITE_ADDR;
-       nandaddr |= NAND_WRITE_CMD;
+       return readb(data->nandaddr + op);
+}
 
-       /* finish with a reset */
-       spin_lock_irqsave(&ebu_lock, flags);
-       writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
-       while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
-               ;
-       spin_unlock_irqrestore(&ebu_lock, flags);
+static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
+{
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct xway_nand_data *data = nand_get_controller_data(chip);
+
+       writeb(value, data->nandaddr + op);
 }
 
-static void xway_select_chip(struct mtd_info *mtd, int chip)
+static void xway_select_chip(struct mtd_info *mtd, int select)
 {
+       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct xway_nand_data *data = nand_get_controller_data(chip);
 
-       switch (chip) {
+       switch (select) {
        case -1:
                ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
                ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+               spin_unlock_irqrestore(&ebu_lock, data->csflags);
                break;
        case 0:
+               spin_lock_irqsave(&ebu_lock, data->csflags);
                ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
                ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
                break;
@@ -89,26 +108,16 @@ static void xway_select_chip(struct mtd_info *mtd, int chip)
 
 static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
-       struct nand_chip *this = mtd_to_nand(mtd);
-       unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
-       unsigned long flags;
-
-       if (ctrl & NAND_CTRL_CHANGE) {
-               nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
-               if (ctrl & NAND_CLE)
-                       nandaddr |= NAND_WRITE_CMD;
-               else
-                       nandaddr |= NAND_WRITE_ADDR;
-               this->IO_ADDR_W = (void __iomem *) nandaddr;
-       }
+       if (cmd == NAND_CMD_NONE)
+               return;
 
-       if (cmd != NAND_CMD_NONE) {
-               spin_lock_irqsave(&ebu_lock, flags);
-               writeb(cmd, this->IO_ADDR_W);
-               while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
-                       ;
-               spin_unlock_irqrestore(&ebu_lock, flags);
-       }
+       if (ctrl & NAND_CLE)
+               xway_writeb(mtd, NAND_WRITE_CMD, cmd);
+       else if (ctrl & NAND_ALE)
+               xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
+
+       while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+               ;
 }
 
 static int xway_dev_ready(struct mtd_info *mtd)
@@ -118,80 +127,122 @@ static int xway_dev_ready(struct mtd_info *mtd)
 
 static unsigned char xway_read_byte(struct mtd_info *mtd)
 {
-       struct nand_chip *this = mtd_to_nand(mtd);
-       unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
-       unsigned long flags;
-       int ret;
+       return xway_readb(mtd, NAND_READ_DATA);
+}
+
+static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+       int i;
 
-       spin_lock_irqsave(&ebu_lock, flags);
-       ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
-       spin_unlock_irqrestore(&ebu_lock, flags);
+       for (i = 0; i < len; i++)
+               buf[i] = xway_readb(mtd, NAND_WRITE_DATA);
+}
 
-       return ret;
+static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               xway_writeb(mtd, NAND_WRITE_DATA, buf[i]);
 }
 
+/*
+ * Probe for the NAND device.
+ */
 static int xway_nand_probe(struct platform_device *pdev)
 {
-       struct nand_chip *this = platform_get_drvdata(pdev);
-       unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
-       const __be32 *cs = of_get_property(pdev->dev.of_node,
-                                       "lantiq,cs", NULL);
+       struct xway_nand_data *data;
+       struct mtd_info *mtd;
+       struct resource *res;
+       int err;
+       u32 cs;
        u32 cs_flag = 0;
 
+       /* Allocate memory for the device structure (and zero it) */
+       data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
+                           GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       data->nandaddr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(data->nandaddr))
+               return PTR_ERR(data->nandaddr);
+
+       nand_set_flash_node(&data->chip, pdev->dev.of_node);
+       mtd = nand_to_mtd(&data->chip);
+       mtd->dev.parent = &pdev->dev;
+
+       data->chip.cmd_ctrl = xway_cmd_ctrl;
+       data->chip.dev_ready = xway_dev_ready;
+       data->chip.select_chip = xway_select_chip;
+       data->chip.write_buf = xway_write_buf;
+       data->chip.read_buf = xway_read_buf;
+       data->chip.read_byte = xway_read_byte;
+       data->chip.chip_delay = 30;
+
+       data->chip.ecc.mode = NAND_ECC_SOFT;
+       data->chip.ecc.algo = NAND_ECC_HAMMING;
+
+       platform_set_drvdata(pdev, data);
+       nand_set_controller_data(&data->chip, data);
+
        /* load our CS from the DT. Either we find a valid 1 or default to 0 */
-       if (cs && (*cs == 1))
+       err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
+       if (!err && cs == 1)
                cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
 
        /* setup the EBU to run in NAND mode on our base addr */
-       ltq_ebu_w32(CPHYSADDR(nandaddr)
-               | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+       ltq_ebu_w32(CPHYSADDR(data->nandaddr)
+                   | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
 
        ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
-               | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
-               | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+                   | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+                   | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
 
        ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
-               | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
-               | cs_flag, EBU_NAND_CON);
+                   | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+                   | cs_flag, EBU_NAND_CON);
 
-       /* finish with a reset */
-       xway_reset_chip(this);
+       /* Scan to find existence of the device */
+       err = nand_scan(mtd, 1);
+       if (err)
+               return err;
 
-       return 0;
-}
+       err = mtd_device_register(mtd, NULL, 0);
+       if (err)
+               nand_release(mtd);
 
-static struct platform_nand_data xway_nand_data = {
-       .chip = {
-               .nr_chips               = 1,
-               .chip_delay             = 30,
-       },
-       .ctrl = {
-               .probe          = xway_nand_probe,
-               .cmd_ctrl       = xway_cmd_ctrl,
-               .dev_ready      = xway_dev_ready,
-               .select_chip    = xway_select_chip,
-               .read_byte      = xway_read_byte,
-       }
-};
+       return err;
+}
 
 /*
- * Try to find the node inside the DT. If it is available attach out
- * platform_nand_data
+ * Remove a NAND device.
  */
-static int __init xway_register_nand(void)
+static int xway_nand_remove(struct platform_device *pdev)
 {
-       struct device_node *node;
-       struct platform_device *pdev;
-
-       node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
-       if (!node)
-               return -ENOENT;
-       pdev = of_find_device_by_node(node);
-       if (!pdev)
-               return -EINVAL;
-       pdev->dev.platform_data = &xway_nand_data;
-       of_node_put(node);
+       struct xway_nand_data *data = platform_get_drvdata(pdev);
+
+       nand_release(nand_to_mtd(&data->chip));
+
        return 0;
 }
 
-subsys_initcall(xway_register_nand);
+static const struct of_device_id xway_nand_match[] = {
+       { .compatible = "lantiq,nand-xway" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xway_nand_match);
+
+static struct platform_driver xway_nand_driver = {
+       .probe  = xway_nand_probe,
+       .remove = xway_nand_remove,
+       .driver = {
+               .name           = "lantiq,nand-xway",
+               .of_match_table = xway_nand_match,
+       },
+};
+
+module_platform_driver(xway_nand_driver);
+
+MODULE_LICENSE("GPL");
index a4b029a417f04edf9740e29d286659b9e19712aa..1a6d0e367b89c193d525e616f6c64c406fa0fe71 100644 (file)
@@ -3188,13 +3188,13 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
                        size_t tmp_retlen;
 
                        ret = action(mtd, from, len, &tmp_retlen, buf);
+                       if (ret)
+                               break;
 
                        buf += tmp_retlen;
                        len -= tmp_retlen;
                        *retlen += tmp_retlen;
 
-                       if (ret)
-                               break;
                }
                otp_pages--;
        }
index d42c98e1f581a0ae076e43f2ac036b7cacebf4dc..4a682ee0f6325cc19d7a81a2dc857985c686b6ee 100644 (file)
@@ -29,6 +29,26 @@ config MTD_SPI_NOR_USE_4K_SECTORS
          Please note that some tools/drivers/filesystems may not work with
          4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
 
+config SPI_ATMEL_QUADSPI
+       tristate "Atmel Quad SPI Controller"
+       depends on ARCH_AT91 || (ARM && COMPILE_TEST)
+       depends on OF && HAS_IOMEM
+       help
+         This enables support for the Quad SPI controller in master mode.
+         This driver does not support generic SPI. The implementation only
+         supports SPI NOR.
+
+config SPI_CADENCE_QUADSPI
+       tristate "Cadence Quad SPI controller"
+       depends on OF && ARM
+       help
+         Enable support for the Cadence Quad SPI Flash controller.
+
+         Cadence QSPI is a specialized controller for connecting an SPI
+         Flash over 1/2/4-bit wide bus. Enable this option if you have a
+         device with a Cadence QSPI controller and want to access the
+         Flash as an MTD device.
+
 config SPI_FSL_QUADSPI
        tristate "Freescale Quad SPI controller"
        depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
@@ -38,6 +58,13 @@ config SPI_FSL_QUADSPI
          This controller does not support generic SPI. It only supports
          SPI NOR.
 
+config SPI_HISI_SFC
+       tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
+       depends on ARCH_HISI || COMPILE_TEST
+       depends on HAS_IOMEM && HAS_DMA
+       help
+         This enables support for hisilicon SPI-NOR flash controller.
+
 config SPI_NXP_SPIFI
        tristate "NXP SPI Flash Interface (SPIFI)"
        depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
index 0bf3a7f81675506c2e071fdd563b8f8e0d72e12b..121695e83542ab80593042254aa73e81d5456d45 100644 (file)
@@ -1,4 +1,7 @@
 obj-$(CONFIG_MTD_SPI_NOR)      += spi-nor.o
+obj-$(CONFIG_SPI_ATMEL_QUADSPI)        += atmel-quadspi.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI)      += cadence-quadspi.o
 obj-$(CONFIG_SPI_FSL_QUADSPI)  += fsl-quadspi.o
+obj-$(CONFIG_SPI_HISI_SFC)     += hisi-sfc.o
 obj-$(CONFIG_MTD_MT81xx_NOR)    += mtk-quadspi.o
 obj-$(CONFIG_SPI_NXP_SPIFI)    += nxp-spifi.o
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c
new file mode 100644 (file)
index 0000000..47937d9
--- /dev/null
@@ -0,0 +1,732 @@
+/*
+ * Driver for Atmel QSPI Controller
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+
+/* QSPI register offsets */
+#define QSPI_CR      0x0000  /* Control Register */
+#define QSPI_MR      0x0004  /* Mode Register */
+#define QSPI_RD      0x0008  /* Receive Data Register */
+#define QSPI_TD      0x000c  /* Transmit Data Register */
+#define QSPI_SR      0x0010  /* Status Register */
+#define QSPI_IER     0x0014  /* Interrupt Enable Register */
+#define QSPI_IDR     0x0018  /* Interrupt Disable Register */
+#define QSPI_IMR     0x001c  /* Interrupt Mask Register */
+#define QSPI_SCR     0x0020  /* Serial Clock Register */
+
+#define QSPI_IAR     0x0030  /* Instruction Address Register */
+#define QSPI_ICR     0x0034  /* Instruction Code Register */
+#define QSPI_IFR     0x0038  /* Instruction Frame Register */
+
+#define QSPI_SMR     0x0040  /* Scrambling Mode Register */
+#define QSPI_SKR     0x0044  /* Scrambling Key Register */
+
+#define QSPI_WPMR    0x00E4  /* Write Protection Mode Register */
+#define QSPI_WPSR    0x00E8  /* Write Protection Status Register */
+
+#define QSPI_VERSION 0x00FC  /* Version Register */
+
+
+/* Bitfields in QSPI_CR (Control Register) */
+#define QSPI_CR_QSPIEN                  BIT(0)
+#define QSPI_CR_QSPIDIS                 BIT(1)
+#define QSPI_CR_SWRST                   BIT(7)
+#define QSPI_CR_LASTXFER                BIT(24)
+
+/* Bitfields in QSPI_MR (Mode Register) */
+#define QSPI_MR_SSM                     BIT(0)
+#define QSPI_MR_LLB                     BIT(1)
+#define QSPI_MR_WDRBT                   BIT(2)
+#define QSPI_MR_SMRM                    BIT(3)
+#define QSPI_MR_CSMODE_MASK             GENMASK(5, 4)
+#define QSPI_MR_CSMODE_NOT_RELOADED     (0 << 4)
+#define QSPI_MR_CSMODE_LASTXFER         (1 << 4)
+#define QSPI_MR_CSMODE_SYSTEMATICALLY   (2 << 4)
+#define QSPI_MR_NBBITS_MASK             GENMASK(11, 8)
+#define QSPI_MR_NBBITS(n)               ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_DLYBCT_MASK             GENMASK(23, 16)
+#define QSPI_MR_DLYBCT(n)               (((n) << 16) & QSPI_MR_DLYBCT_MASK)
+#define QSPI_MR_DLYCS_MASK              GENMASK(31, 24)
+#define QSPI_MR_DLYCS(n)                (((n) << 24) & QSPI_MR_DLYCS_MASK)
+
+/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR  */
+#define QSPI_SR_RDRF                    BIT(0)
+#define QSPI_SR_TDRE                    BIT(1)
+#define QSPI_SR_TXEMPTY                 BIT(2)
+#define QSPI_SR_OVRES                   BIT(3)
+#define QSPI_SR_CSR                     BIT(8)
+#define QSPI_SR_CSS                     BIT(9)
+#define QSPI_SR_INSTRE                  BIT(10)
+#define QSPI_SR_QSPIENS                 BIT(24)
+
+#define QSPI_SR_CMD_COMPLETED  (QSPI_SR_INSTRE | QSPI_SR_CSR)
+
+/* Bitfields in QSPI_SCR (Serial Clock Register) */
+#define QSPI_SCR_CPOL                   BIT(0)
+#define QSPI_SCR_CPHA                   BIT(1)
+#define QSPI_SCR_SCBR_MASK              GENMASK(15, 8)
+#define QSPI_SCR_SCBR(n)                (((n) << 8) & QSPI_SCR_SCBR_MASK)
+#define QSPI_SCR_DLYBS_MASK             GENMASK(23, 16)
+#define QSPI_SCR_DLYBS(n)               (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+
+/* Bitfields in QSPI_ICR (Instruction Code Register) */
+#define QSPI_ICR_INST_MASK              GENMASK(7, 0)
+#define QSPI_ICR_INST(inst)             (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_OPT_MASK               GENMASK(23, 16)
+#define QSPI_ICR_OPT(opt)               (((opt) << 16) & QSPI_ICR_OPT_MASK)
+
+/* Bitfields in QSPI_IFR (Instruction Frame Register) */
+#define QSPI_IFR_WIDTH_MASK             GENMASK(2, 0)
+#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI   (0 << 0)
+#define QSPI_IFR_WIDTH_DUAL_OUTPUT      (1 << 0)
+#define QSPI_IFR_WIDTH_QUAD_OUTPUT      (2 << 0)
+#define QSPI_IFR_WIDTH_DUAL_IO          (3 << 0)
+#define QSPI_IFR_WIDTH_QUAD_IO          (4 << 0)
+#define QSPI_IFR_WIDTH_DUAL_CMD         (5 << 0)
+#define QSPI_IFR_WIDTH_QUAD_CMD         (6 << 0)
+#define QSPI_IFR_INSTEN                 BIT(4)
+#define QSPI_IFR_ADDREN                 BIT(5)
+#define QSPI_IFR_OPTEN                  BIT(6)
+#define QSPI_IFR_DATAEN                 BIT(7)
+#define QSPI_IFR_OPTL_MASK              GENMASK(9, 8)
+#define QSPI_IFR_OPTL_1BIT              (0 << 8)
+#define QSPI_IFR_OPTL_2BIT              (1 << 8)
+#define QSPI_IFR_OPTL_4BIT              (2 << 8)
+#define QSPI_IFR_OPTL_8BIT              (3 << 8)
+#define QSPI_IFR_ADDRL                  BIT(10)
+#define QSPI_IFR_TFRTYP_MASK            GENMASK(13, 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ      (0 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM  (1 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE     (2 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
+#define QSPI_IFR_CRM                    BIT(14)
+#define QSPI_IFR_NBDUM_MASK             GENMASK(20, 16)
+#define QSPI_IFR_NBDUM(n)               (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+
+/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
+#define QSPI_SMR_SCREN                  BIT(0)
+#define QSPI_SMR_RVDIS                  BIT(1)
+
+/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
+#define QSPI_WPMR_WPEN                  BIT(0)
+#define QSPI_WPMR_WPKEY_MASK            GENMASK(31, 8)
+#define QSPI_WPMR_WPKEY(wpkey)          (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
+
+/* Bitfields in QSPI_WPSR (Write Protection Status Register) */
+#define QSPI_WPSR_WPVS                  BIT(0)
+#define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
+#define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
+
+
+struct atmel_qspi {
+       void __iomem            *regs;
+       void __iomem            *mem;
+       struct clk              *clk;
+       struct platform_device  *pdev;
+       u32                     pending;
+
+       struct spi_nor          nor;
+       u32                     clk_rate;
+       struct completion       cmd_completion;
+};
+
+struct atmel_qspi_command {
+       union {
+               struct {
+                       u32     instruction:1;
+                       u32     address:3;
+                       u32     mode:1;
+                       u32     dummy:1;
+                       u32     data:1;
+                       u32     reserved:25;
+               }               bits;
+               u32     word;
+       }       enable;
+       u8      instruction;
+       u8      mode;
+       u8      num_mode_cycles;
+       u8      num_dummy_cycles;
+       u32     address;
+
+       size_t          buf_len;
+       const void      *tx_buf;
+       void            *rx_buf;
+};
+
+/* Register access functions */
+static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg)
+{
+       return readl_relaxed(aq->regs + reg);
+}
+
+static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
+{
+       writel_relaxed(value, aq->regs + reg);
+}
+
+static int atmel_qspi_run_transfer(struct atmel_qspi *aq,
+                                  const struct atmel_qspi_command *cmd)
+{
+       void __iomem *ahb_mem;
+
+       /* Then fallback to a PIO transfer (memcpy() DOES NOT work!) */
+       ahb_mem = aq->mem;
+       if (cmd->enable.bits.address)
+               ahb_mem += cmd->address;
+       if (cmd->tx_buf)
+               _memcpy_toio(ahb_mem, cmd->tx_buf, cmd->buf_len);
+       else
+               _memcpy_fromio(cmd->rx_buf, ahb_mem, cmd->buf_len);
+
+       return 0;
+}
+
+#ifdef DEBUG
+static void atmel_qspi_debug_command(struct atmel_qspi *aq,
+                                    const struct atmel_qspi_command *cmd,
+                                    u32 ifr)
+{
+       u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+       size_t len = 0;
+       int i;
+
+       if (cmd->enable.bits.instruction)
+               cmd_buf[len++] = cmd->instruction;
+
+       for (i = cmd->enable.bits.address-1; i >= 0; --i)
+               cmd_buf[len++] = (cmd->address >> (i << 3)) & 0xff;
+
+       if (cmd->enable.bits.mode)
+               cmd_buf[len++] = cmd->mode;
+
+       if (cmd->enable.bits.dummy) {
+               int num = cmd->num_dummy_cycles;
+
+               switch (ifr & QSPI_IFR_WIDTH_MASK) {
+               case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+               case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+               case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+                       num >>= 3;
+                       break;
+               case QSPI_IFR_WIDTH_DUAL_IO:
+               case QSPI_IFR_WIDTH_DUAL_CMD:
+                       num >>= 2;
+                       break;
+               case QSPI_IFR_WIDTH_QUAD_IO:
+               case QSPI_IFR_WIDTH_QUAD_CMD:
+                       num >>= 1;
+                       break;
+               default:
+                       return;
+               }
+
+               for (i = 0; i < num; ++i)
+                       cmd_buf[len++] = 0;
+       }
+
+       /* Dump the SPI command */
+       print_hex_dump(KERN_DEBUG, "qspi cmd: ", DUMP_PREFIX_NONE,
+                      32, 1, cmd_buf, len, false);
+
+#ifdef VERBOSE_DEBUG
+       /* If verbose debug is enabled, also dump the TX data */
+       if (cmd->enable.bits.data && cmd->tx_buf)
+               print_hex_dump(KERN_DEBUG, "qspi tx : ", DUMP_PREFIX_NONE,
+                              32, 1, cmd->tx_buf, cmd->buf_len, false);
+#endif
+}
+#else
+#define atmel_qspi_debug_command(aq, cmd, ifr)
+#endif
+
+static int atmel_qspi_run_command(struct atmel_qspi *aq,
+                                 const struct atmel_qspi_command *cmd,
+                                 u32 ifr_tfrtyp, u32 ifr_width)
+{
+       u32 iar, icr, ifr, sr;
+       int err = 0;
+
+       iar = 0;
+       icr = 0;
+       ifr = ifr_tfrtyp | ifr_width;
+
+       /* Compute instruction parameters */
+       if (cmd->enable.bits.instruction) {
+               icr |= QSPI_ICR_INST(cmd->instruction);
+               ifr |= QSPI_IFR_INSTEN;
+       }
+
+       /* Compute address parameters */
+       switch (cmd->enable.bits.address) {
+       case 4:
+               ifr |= QSPI_IFR_ADDRL;
+               /* fall through to the 24bit (3 byte) address case. */
+       case 3:
+               iar = (cmd->enable.bits.data) ? 0 : cmd->address;
+               ifr |= QSPI_IFR_ADDREN;
+               break;
+       case 0:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Compute option parameters */
+       if (cmd->enable.bits.mode && cmd->num_mode_cycles) {
+               u32 mode_cycle_bits, mode_bits;
+
+               icr |= QSPI_ICR_OPT(cmd->mode);
+               ifr |= QSPI_IFR_OPTEN;
+
+               switch (ifr & QSPI_IFR_WIDTH_MASK) {
+               case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+               case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+               case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+                       mode_cycle_bits = 1;
+                       break;
+               case QSPI_IFR_WIDTH_DUAL_IO:
+               case QSPI_IFR_WIDTH_DUAL_CMD:
+                       mode_cycle_bits = 2;
+                       break;
+               case QSPI_IFR_WIDTH_QUAD_IO:
+               case QSPI_IFR_WIDTH_QUAD_CMD:
+                       mode_cycle_bits = 4;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               mode_bits = cmd->num_mode_cycles * mode_cycle_bits;
+               switch (mode_bits) {
+               case 1:
+                       ifr |= QSPI_IFR_OPTL_1BIT;
+                       break;
+
+               case 2:
+                       ifr |= QSPI_IFR_OPTL_2BIT;
+                       break;
+
+               case 4:
+                       ifr |= QSPI_IFR_OPTL_4BIT;
+                       break;
+
+               case 8:
+                       ifr |= QSPI_IFR_OPTL_8BIT;
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       /* Set number of dummy cycles */
+       if (cmd->enable.bits.dummy)
+               ifr |= QSPI_IFR_NBDUM(cmd->num_dummy_cycles);
+
+       /* Set data enable */
+       if (cmd->enable.bits.data) {
+               ifr |= QSPI_IFR_DATAEN;
+
+               /* Special case for Continuous Read Mode */
+               if (!cmd->tx_buf && !cmd->rx_buf)
+                       ifr |= QSPI_IFR_CRM;
+       }
+
+       /* Clear pending interrupts */
+       (void)qspi_readl(aq, QSPI_SR);
+
+       /* Set QSPI Instruction Frame registers */
+       atmel_qspi_debug_command(aq, cmd, ifr);
+       qspi_writel(aq, QSPI_IAR, iar);
+       qspi_writel(aq, QSPI_ICR, icr);
+       qspi_writel(aq, QSPI_IFR, ifr);
+
+       /* Skip to the final steps if there is no data */
+       if (!cmd->enable.bits.data)
+               goto no_data;
+
+       /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+       (void)qspi_readl(aq, QSPI_IFR);
+
+       /* Stop here for continuous read */
+       if (!cmd->tx_buf && !cmd->rx_buf)
+               return 0;
+       /* Send/Receive data */
+       err = atmel_qspi_run_transfer(aq, cmd);
+
+       /* Release the chip-select */
+       qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
+
+       if (err)
+               return err;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+       /*
+        * If verbose debug is enabled, also dump the RX data in addition to
+        * the SPI command previously dumped by atmel_qspi_debug_command()
+        */
+       if (cmd->rx_buf)
+               print_hex_dump(KERN_DEBUG, "qspi rx : ", DUMP_PREFIX_NONE,
+                              32, 1, cmd->rx_buf, cmd->buf_len, false);
+#endif
+no_data:
+       /* Poll INSTRuction End status */
+       sr = qspi_readl(aq, QSPI_SR);
+       if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+               return err;
+
+       /* Wait for INSTRuction End interrupt */
+       reinit_completion(&aq->cmd_completion);
+       aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+       qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED);
+       if (!wait_for_completion_timeout(&aq->cmd_completion,
+                                        msecs_to_jiffies(1000)))
+               err = -ETIMEDOUT;
+       qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED);
+
+       return err;
+}
+
+static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode,
+                              u8 *buf, int len)
+{
+       struct atmel_qspi *aq = nor->priv;
+       struct atmel_qspi_command cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.enable.bits.instruction = 1;
+       cmd.enable.bits.data = 1;
+       cmd.instruction = opcode;
+       cmd.rx_buf = buf;
+       cmd.buf_len = len;
+       return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ,
+                                     QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode,
+                               u8 *buf, int len)
+{
+       struct atmel_qspi *aq = nor->priv;
+       struct atmel_qspi_command cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.enable.bits.instruction = 1;
+       cmd.enable.bits.data = (buf != NULL && len > 0);
+       cmd.instruction = opcode;
+       cmd.tx_buf = buf;
+       cmd.buf_len = len;
+       return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+                                     QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
+                               const u_char *write_buf)
+{
+       struct atmel_qspi *aq = nor->priv;
+       struct atmel_qspi_command cmd;
+       ssize_t ret;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.enable.bits.instruction = 1;
+       cmd.enable.bits.address = nor->addr_width;
+       cmd.enable.bits.data = 1;
+       cmd.instruction = nor->program_opcode;
+       cmd.address = (u32)to;
+       cmd.tx_buf = write_buf;
+       cmd.buf_len = len;
+       ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM,
+                                    QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+       return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+       struct atmel_qspi *aq = nor->priv;
+       struct atmel_qspi_command cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.enable.bits.instruction = 1;
+       cmd.enable.bits.address = nor->addr_width;
+       cmd.instruction = nor->erase_opcode;
+       cmd.address = (u32)offs;
+       return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+                                     QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
+                              u_char *read_buf)
+{
+       struct atmel_qspi *aq = nor->priv;
+       struct atmel_qspi_command cmd;
+       u8 num_mode_cycles, num_dummy_cycles;
+       u32 ifr_width;
+       ssize_t ret;
+
+       switch (nor->flash_read) {
+       case SPI_NOR_NORMAL:
+       case SPI_NOR_FAST:
+               ifr_width = QSPI_IFR_WIDTH_SINGLE_BIT_SPI;
+               break;
+
+       case SPI_NOR_DUAL:
+               ifr_width = QSPI_IFR_WIDTH_DUAL_OUTPUT;
+               break;
+
+       case SPI_NOR_QUAD:
+               ifr_width = QSPI_IFR_WIDTH_QUAD_OUTPUT;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (nor->read_dummy >= 2) {
+               num_mode_cycles = 2;
+               num_dummy_cycles = nor->read_dummy - 2;
+       } else {
+               num_mode_cycles = nor->read_dummy;
+               num_dummy_cycles = 0;
+       }
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.enable.bits.instruction = 1;
+       cmd.enable.bits.address = nor->addr_width;
+       cmd.enable.bits.mode = (num_mode_cycles > 0);
+       cmd.enable.bits.dummy = (num_dummy_cycles > 0);
+       cmd.enable.bits.data = 1;
+       cmd.instruction = nor->read_opcode;
+       cmd.address = (u32)from;
+       cmd.mode = 0xff; /* This value prevents from entering the 0-4-4 mode */
+       cmd.num_mode_cycles = num_mode_cycles;
+       cmd.num_dummy_cycles = num_dummy_cycles;
+       cmd.rx_buf = read_buf;
+       cmd.buf_len = len;
+       ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM,
+                                    ifr_width);
+       return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_init(struct atmel_qspi *aq)
+{
+       unsigned long src_rate;
+       u32 mr, scr, scbr;
+
+       /* Reset the QSPI controller */
+       qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+
+       /* Set the QSPI controller in Serial Memory Mode */
+       mr = QSPI_MR_NBBITS(8) | QSPI_MR_SSM;
+       qspi_writel(aq, QSPI_MR, mr);
+
+       src_rate = clk_get_rate(aq->clk);
+       if (!src_rate)
+               return -EINVAL;
+
+       /* Compute the QSPI baudrate */
+       scbr = DIV_ROUND_UP(src_rate, aq->clk_rate);
+       if (scbr > 0)
+               scbr--;
+       scr = QSPI_SCR_SCBR(scbr);
+       qspi_writel(aq, QSPI_SCR, scr);
+
+       /* Enable the QSPI controller */
+       qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
+
+       return 0;
+}
+
+static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
+{
+       struct atmel_qspi *aq = (struct atmel_qspi *)dev_id;
+       u32 status, mask, pending;
+
+       status = qspi_readl(aq, QSPI_SR);
+       mask = qspi_readl(aq, QSPI_IMR);
+       pending = status & mask;
+
+       if (!pending)
+               return IRQ_NONE;
+
+       aq->pending |= pending;
+       if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+               complete(&aq->cmd_completion);
+
+       return IRQ_HANDLED;
+}
+
+static int atmel_qspi_probe(struct platform_device *pdev)
+{
+       struct device_node *child, *np = pdev->dev.of_node;
+       struct atmel_qspi *aq;
+       struct resource *res;
+       struct spi_nor *nor;
+       struct mtd_info *mtd;
+       int irq, err = 0;
+
+       if (of_get_child_count(np) != 1)
+               return -ENODEV;
+       child = of_get_next_child(np, NULL);
+
+       aq = devm_kzalloc(&pdev->dev, sizeof(*aq), GFP_KERNEL);
+       if (!aq) {
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       platform_set_drvdata(pdev, aq);
+       init_completion(&aq->cmd_completion);
+       aq->pdev = pdev;
+
+       /* Map the registers */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+       aq->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(aq->regs)) {
+               dev_err(&pdev->dev, "missing registers\n");
+               err = PTR_ERR(aq->regs);
+               goto exit;
+       }
+
+       /* Map the AHB memory */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
+       aq->mem = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(aq->mem)) {
+               dev_err(&pdev->dev, "missing AHB memory\n");
+               err = PTR_ERR(aq->mem);
+               goto exit;
+       }
+
+       /* Get the peripheral clock */
+       aq->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(aq->clk)) {
+               dev_err(&pdev->dev, "missing peripheral clock\n");
+               err = PTR_ERR(aq->clk);
+               goto exit;
+       }
+
+       /* Enable the peripheral clock */
+       err = clk_prepare_enable(aq->clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
+               goto exit;
+       }
+
+       /* Request the IRQ */
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "missing IRQ\n");
+               err = irq;
+               goto disable_clk;
+       }
+       err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
+                              0, dev_name(&pdev->dev), aq);
+       if (err)
+               goto disable_clk;
+
+       /* Setup the spi-nor */
+       nor = &aq->nor;
+       mtd = &nor->mtd;
+
+       nor->dev = &pdev->dev;
+       spi_nor_set_flash_node(nor, child);
+       nor->priv = aq;
+       mtd->priv = nor;
+
+       nor->read_reg = atmel_qspi_read_reg;
+       nor->write_reg = atmel_qspi_write_reg;
+       nor->read = atmel_qspi_read;
+       nor->write = atmel_qspi_write;
+       nor->erase = atmel_qspi_erase;
+
+       err = of_property_read_u32(child, "spi-max-frequency", &aq->clk_rate);
+       if (err < 0)
+               goto disable_clk;
+
+       err = atmel_qspi_init(aq);
+       if (err)
+               goto disable_clk;
+
+       err = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+       if (err)
+               goto disable_clk;
+
+       err = mtd_device_register(mtd, NULL, 0);
+       if (err)
+               goto disable_clk;
+
+       of_node_put(child);
+
+       return 0;
+
+disable_clk:
+       clk_disable_unprepare(aq->clk);
+exit:
+       of_node_put(child);
+
+       return err;
+}
+
+static int atmel_qspi_remove(struct platform_device *pdev)
+{
+       struct atmel_qspi *aq = platform_get_drvdata(pdev);
+
+       mtd_device_unregister(&aq->nor.mtd);
+       qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
+       clk_disable_unprepare(aq->clk);
+       return 0;
+}
+
+
+static const struct of_device_id atmel_qspi_dt_ids[] = {
+       { .compatible = "atmel,sama5d2-qspi" },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
+
+static struct platform_driver atmel_qspi_driver = {
+       .driver = {
+               .name   = "atmel_qspi",
+               .of_match_table = atmel_qspi_dt_ids,
+       },
+       .probe          = atmel_qspi_probe,
+       .remove         = atmel_qspi_remove,
+};
+module_platform_driver(atmel_qspi_driver);
+
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
+MODULE_DESCRIPTION("Atmel QSPI Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
new file mode 100644 (file)
index 0000000..d403ba7
--- /dev/null
@@ -0,0 +1,1299 @@
+/*
+ * Driver for Cadence QSPI Controller
+ *
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/timer.h>
+
+#define CQSPI_NAME                     "cadence-qspi"
+#define CQSPI_MAX_CHIPSELECT           16
+
+struct cqspi_st;
+
+struct cqspi_flash_pdata {
+       struct spi_nor  nor;
+       struct cqspi_st *cqspi;
+       u32             clk_rate;
+       u32             read_delay;
+       u32             tshsl_ns;
+       u32             tsd2d_ns;
+       u32             tchsh_ns;
+       u32             tslch_ns;
+       u8              inst_width;
+       u8              addr_width;
+       u8              data_width;
+       u8              cs;
+       bool            registered;
+};
+
+struct cqspi_st {
+       struct platform_device  *pdev;
+
+       struct clk              *clk;
+       unsigned int            sclk;
+
+       void __iomem            *iobase;
+       void __iomem            *ahb_base;
+       struct completion       transfer_complete;
+       struct mutex            bus_mutex;
+
+       int                     current_cs;
+       int                     current_page_size;
+       int                     current_erase_size;
+       int                     current_addr_width;
+       unsigned long           master_ref_clk_hz;
+       bool                    is_decoded_cs;
+       u32                     fifo_depth;
+       u32                     fifo_width;
+       u32                     trigger_address;
+       struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+};
+
+/* Operation timeout value */
+#define CQSPI_TIMEOUT_MS                       500
+#define CQSPI_READ_TIMEOUT_MS                  10
+
+/* Instruction type */
+#define CQSPI_INST_TYPE_SINGLE                 0
+#define CQSPI_INST_TYPE_DUAL                   1
+#define CQSPI_INST_TYPE_QUAD                   2
+
+#define CQSPI_DUMMY_CLKS_PER_BYTE              8
+#define CQSPI_DUMMY_BYTES_MAX                  4
+#define CQSPI_DUMMY_CLKS_MAX                   31
+
+#define CQSPI_STIG_DATA_LEN_MAX                        8
+
+/* Register map */
+#define CQSPI_REG_CONFIG                       0x00
+#define CQSPI_REG_CONFIG_ENABLE_MASK           BIT(0)
+#define CQSPI_REG_CONFIG_DECODE_MASK           BIT(9)
+#define CQSPI_REG_CONFIG_CHIPSELECT_LSB                10
+#define CQSPI_REG_CONFIG_DMA_MASK              BIT(15)
+#define CQSPI_REG_CONFIG_BAUD_LSB              19
+#define CQSPI_REG_CONFIG_IDLE_LSB              31
+#define CQSPI_REG_CONFIG_CHIPSELECT_MASK       0xF
+#define CQSPI_REG_CONFIG_BAUD_MASK             0xF
+
+#define CQSPI_REG_RD_INSTR                     0x04
+#define CQSPI_REG_RD_INSTR_OPCODE_LSB          0
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB      8
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB       12
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB       16
+#define CQSPI_REG_RD_INSTR_MODE_EN_LSB         20
+#define CQSPI_REG_RD_INSTR_DUMMY_LSB           24
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK     0x3
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK      0x3
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK      0x3
+#define CQSPI_REG_RD_INSTR_DUMMY_MASK          0x1F
+
+#define CQSPI_REG_WR_INSTR                     0x08
+#define CQSPI_REG_WR_INSTR_OPCODE_LSB          0
+#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB       12
+#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB       16
+
+#define CQSPI_REG_DELAY                                0x0C
+#define CQSPI_REG_DELAY_TSLCH_LSB              0
+#define CQSPI_REG_DELAY_TCHSH_LSB              8
+#define CQSPI_REG_DELAY_TSD2D_LSB              16
+#define CQSPI_REG_DELAY_TSHSL_LSB              24
+#define CQSPI_REG_DELAY_TSLCH_MASK             0xFF
+#define CQSPI_REG_DELAY_TCHSH_MASK             0xFF
+#define CQSPI_REG_DELAY_TSD2D_MASK             0xFF
+#define CQSPI_REG_DELAY_TSHSL_MASK             0xFF
+
+#define CQSPI_REG_READCAPTURE                  0x10
+#define CQSPI_REG_READCAPTURE_BYPASS_LSB       0
+#define CQSPI_REG_READCAPTURE_DELAY_LSB                1
+#define CQSPI_REG_READCAPTURE_DELAY_MASK       0xF
+
+#define CQSPI_REG_SIZE                         0x14
+#define CQSPI_REG_SIZE_ADDRESS_LSB             0
+#define CQSPI_REG_SIZE_PAGE_LSB                        4
+#define CQSPI_REG_SIZE_BLOCK_LSB               16
+#define CQSPI_REG_SIZE_ADDRESS_MASK            0xF
+#define CQSPI_REG_SIZE_PAGE_MASK               0xFFF
+#define CQSPI_REG_SIZE_BLOCK_MASK              0x3F
+
+#define CQSPI_REG_SRAMPARTITION                        0x18
+#define CQSPI_REG_INDIRECTTRIGGER              0x1C
+
+#define CQSPI_REG_DMA                          0x20
+#define CQSPI_REG_DMA_SINGLE_LSB               0
+#define CQSPI_REG_DMA_BURST_LSB                        8
+#define CQSPI_REG_DMA_SINGLE_MASK              0xFF
+#define CQSPI_REG_DMA_BURST_MASK               0xFF
+
+#define CQSPI_REG_REMAP                                0x24
+#define CQSPI_REG_MODE_BIT                     0x28
+
+#define CQSPI_REG_SDRAMLEVEL                   0x2C
+#define CQSPI_REG_SDRAMLEVEL_RD_LSB            0
+#define CQSPI_REG_SDRAMLEVEL_WR_LSB            16
+#define CQSPI_REG_SDRAMLEVEL_RD_MASK           0xFFFF
+#define CQSPI_REG_SDRAMLEVEL_WR_MASK           0xFFFF
+
+#define CQSPI_REG_IRQSTATUS                    0x40
+#define CQSPI_REG_IRQMASK                      0x44
+
+#define CQSPI_REG_INDIRECTRD                   0x60
+#define CQSPI_REG_INDIRECTRD_START_MASK                BIT(0)
+#define CQSPI_REG_INDIRECTRD_CANCEL_MASK       BIT(1)
+#define CQSPI_REG_INDIRECTRD_DONE_MASK         BIT(5)
+
+#define CQSPI_REG_INDIRECTRDWATERMARK          0x64
+#define CQSPI_REG_INDIRECTRDSTARTADDR          0x68
+#define CQSPI_REG_INDIRECTRDBYTES              0x6C
+
+#define CQSPI_REG_CMDCTRL                      0x90
+#define CQSPI_REG_CMDCTRL_EXECUTE_MASK         BIT(0)
+#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK      BIT(1)
+#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB         12
+#define CQSPI_REG_CMDCTRL_WR_EN_LSB            15
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB                16
+#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB          19
+#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB         20
+#define CQSPI_REG_CMDCTRL_RD_EN_LSB            23
+#define CQSPI_REG_CMDCTRL_OPCODE_LSB           24
+#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK                0x7
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK       0x3
+#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK                0x7
+
+#define CQSPI_REG_INDIRECTWR                   0x70
+#define CQSPI_REG_INDIRECTWR_START_MASK                BIT(0)
+#define CQSPI_REG_INDIRECTWR_CANCEL_MASK       BIT(1)
+#define CQSPI_REG_INDIRECTWR_DONE_MASK         BIT(5)
+
+#define CQSPI_REG_INDIRECTWRWATERMARK          0x74
+#define CQSPI_REG_INDIRECTWRSTARTADDR          0x78
+#define CQSPI_REG_INDIRECTWRBYTES              0x7C
+
+#define CQSPI_REG_CMDADDRESS                   0x94
+#define CQSPI_REG_CMDREADDATALOWER             0xA0
+#define CQSPI_REG_CMDREADDATAUPPER             0xA4
+#define CQSPI_REG_CMDWRITEDATALOWER            0xA8
+#define CQSPI_REG_CMDWRITEDATAUPPER            0xAC
+
+/* Interrupt status bits */
+#define CQSPI_REG_IRQ_MODE_ERR                 BIT(0)
+#define CQSPI_REG_IRQ_UNDERFLOW                        BIT(1)
+#define CQSPI_REG_IRQ_IND_COMP                 BIT(2)
+#define CQSPI_REG_IRQ_IND_RD_REJECT            BIT(3)
+#define CQSPI_REG_IRQ_WR_PROTECTED_ERR         BIT(4)
+#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR          BIT(5)
+#define CQSPI_REG_IRQ_WATERMARK                        BIT(6)
+#define CQSPI_REG_IRQ_IND_SRAM_FULL            BIT(12)
+
+#define CQSPI_IRQ_MASK_RD              (CQSPI_REG_IRQ_WATERMARK        | \
+                                        CQSPI_REG_IRQ_IND_SRAM_FULL    | \
+                                        CQSPI_REG_IRQ_IND_COMP)
+
+#define CQSPI_IRQ_MASK_WR              (CQSPI_REG_IRQ_IND_COMP         | \
+                                        CQSPI_REG_IRQ_WATERMARK        | \
+                                        CQSPI_REG_IRQ_UNDERFLOW)
+
+#define CQSPI_IRQ_STATUS_MASK          0x1FFFF
+
+static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
+{
+       unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+       u32 val;
+
+       while (1) {
+               val = readl(reg);
+               if (clear)
+                       val = ~val;
+               val &= mask;
+
+               if (val == mask)
+                       return 0;
+
+               if (time_after(jiffies, end))
+                       return -ETIMEDOUT;
+       }
+}
+
+static bool cqspi_is_idle(struct cqspi_st *cqspi)
+{
+       u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+
+       return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
+}
+
+static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
+{
+       u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
+
+       reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
+       return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
+}
+
+static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
+{
+       struct cqspi_st *cqspi = dev;
+       unsigned int irq_status;
+
+       /* Read interrupt status */
+       irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+       /* Clear interrupt */
+       writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+       irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+
+       if (irq_status)
+               complete(&cqspi->transfer_complete);
+
+       return IRQ_HANDLED;
+}
+
+static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       u32 rdreg = 0;
+
+       rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
+       rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
+       rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+
+       return rdreg;
+}
+
+static int cqspi_wait_idle(struct cqspi_st *cqspi)
+{
+       const unsigned int poll_idle_retry = 3;
+       unsigned int count = 0;
+       unsigned long timeout;
+
+       timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+       while (1) {
+               /*
+                * Read few times in succession to ensure the controller
+                * is indeed idle, that is, the bit does not transition
+                * low again.
+                */
+               if (cqspi_is_idle(cqspi))
+                       count++;
+               else
+                       count = 0;
+
+               if (count >= poll_idle_retry)
+                       return 0;
+
+               if (time_after(jiffies, timeout)) {
+                       /* Timeout, in busy mode. */
+                       dev_err(&cqspi->pdev->dev,
+                               "QSPI is still busy after %dms timeout.\n",
+                               CQSPI_TIMEOUT_MS);
+                       return -ETIMEDOUT;
+               }
+
+               cpu_relax();
+       }
+}
+
+static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
+{
+       void __iomem *reg_base = cqspi->iobase;
+       int ret;
+
+       /* Write the CMDCTRL without start execution. */
+       writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+       /* Start execute */
+       reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
+       writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+
+       /* Polling for completion. */
+       ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
+                                CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
+       if (ret) {
+               dev_err(&cqspi->pdev->dev,
+                       "Flash command execution timed out.\n");
+               return ret;
+       }
+
+       /* Polling QSPI idle status. */
+       return cqspi_wait_idle(cqspi);
+}
+
+static int cqspi_command_read(struct spi_nor *nor,
+                             const u8 *txbuf, const unsigned n_tx,
+                             u8 *rxbuf, const unsigned n_rx)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int rdreg;
+       unsigned int reg;
+       unsigned int read_len;
+       int status;
+
+       if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
+               dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
+                       n_rx, rxbuf);
+               return -EINVAL;
+       }
+
+       reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+       rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
+       writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
+
+       reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+
+       /* 0 means 1 byte. */
+       reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
+               << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+       status = cqspi_exec_flash_cmd(cqspi, reg);
+       if (status)
+               return status;
+
+       reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
+
+       /* Put the read value into rx_buf */
+       read_len = (n_rx > 4) ? 4 : n_rx;
+       memcpy(rxbuf, &reg, read_len);
+       rxbuf += read_len;
+
+       if (n_rx > 4) {
+               reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
+
+               read_len = n_rx - read_len;
+               memcpy(rxbuf, &reg, read_len);
+       }
+
+       return 0;
+}
+
+static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
+                              const u8 *txbuf, const unsigned n_tx)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int reg;
+       unsigned int data;
+       int ret;
+
+       if (n_tx > 4 || (n_tx && !txbuf)) {
+               dev_err(nor->dev,
+                       "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+                       n_tx, txbuf);
+               return -EINVAL;
+       }
+
+       reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+       if (n_tx) {
+               reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+               reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
+                       << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+               data = 0;
+               memcpy(&data, txbuf, n_tx);
+               writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
+       }
+
+       ret = cqspi_exec_flash_cmd(cqspi, reg);
+       return ret;
+}
+
+static int cqspi_command_write_addr(struct spi_nor *nor,
+                                   const u8 opcode, const unsigned int addr)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int reg;
+
+       reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+       reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+       reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+               << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+       writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
+
+       return cqspi_exec_flash_cmd(cqspi, reg);
+}
+
+static int cqspi_indirect_read_setup(struct spi_nor *nor,
+                                    const unsigned int from_addr)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int dummy_clk = 0;
+       unsigned int reg;
+
+       writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+
+       reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+       reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
+
+       /* Setup dummy clock cycles */
+       dummy_clk = nor->read_dummy;
+       if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+               dummy_clk = CQSPI_DUMMY_CLKS_MAX;
+
+       if (dummy_clk / 8) {
+               reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+               /* Set mode bits high to ensure chip doesn't enter XIP */
+               writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
+
+               /* Need to subtract the mode byte (8 clocks). */
+               if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
+                       dummy_clk -= 8;
+
+               if (dummy_clk)
+                       reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+                              << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+       }
+
+       writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+       /* Set address width */
+       reg = readl(reg_base + CQSPI_REG_SIZE);
+       reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+       reg |= (nor->addr_width - 1);
+       writel(reg, reg_base + CQSPI_REG_SIZE);
+       return 0;
+}
+
+static int cqspi_indirect_read_execute(struct spi_nor *nor,
+                                      u8 *rxbuf, const unsigned n_rx)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+       void __iomem *ahb_base = cqspi->ahb_base;
+       unsigned int remaining = n_rx;
+       unsigned int bytes_to_read = 0;
+       int ret = 0;
+
+       writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+
+       /* Clear all interrupts. */
+       writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+       writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+
+       reinit_completion(&cqspi->transfer_complete);
+       writel(CQSPI_REG_INDIRECTRD_START_MASK,
+              reg_base + CQSPI_REG_INDIRECTRD);
+
+       while (remaining > 0) {
+               ret = wait_for_completion_timeout(&cqspi->transfer_complete,
+                                                 msecs_to_jiffies
+                                                 (CQSPI_READ_TIMEOUT_MS));
+
+               bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+
+               if (!ret && bytes_to_read == 0) {
+                       dev_err(nor->dev, "Indirect read timeout, no bytes\n");
+                       ret = -ETIMEDOUT;
+                       goto failrd;
+               }
+
+               while (bytes_to_read != 0) {
+                       bytes_to_read *= cqspi->fifo_width;
+                       bytes_to_read = bytes_to_read > remaining ?
+                                       remaining : bytes_to_read;
+                       readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
+                       rxbuf += bytes_to_read;
+                       remaining -= bytes_to_read;
+                       bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+               }
+
+               if (remaining > 0)
+                       reinit_completion(&cqspi->transfer_complete);
+       }
+
+       /* Check indirect done status */
+       ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
+                                CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
+       if (ret) {
+               dev_err(nor->dev,
+                       "Indirect read completion error (%i)\n", ret);
+               goto failrd;
+       }
+
+       /* Disable interrupt */
+       writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+       /* Clear indirect completion status */
+       writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
+
+       return 0;
+
+failrd:
+       /* Disable interrupt */
+       writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+       /* Cancel the indirect read */
+       writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+              reg_base + CQSPI_REG_INDIRECTRD);
+       return ret;
+}
+
+static int cqspi_indirect_write_setup(struct spi_nor *nor,
+                                     const unsigned int to_addr)
+{
+       unsigned int reg;
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+
+       /* Set opcode. */
+       reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+       writel(reg, reg_base + CQSPI_REG_WR_INSTR);
+       reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+       writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+       writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
+
+       reg = readl(reg_base + CQSPI_REG_SIZE);
+       reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+       reg |= (nor->addr_width - 1);
+       writel(reg, reg_base + CQSPI_REG_SIZE);
+       return 0;
+}
+
+static int cqspi_indirect_write_execute(struct spi_nor *nor,
+                                       const u8 *txbuf, const unsigned n_tx)
+{
+       const unsigned int page_size = nor->page_size;
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int remaining = n_tx;
+       unsigned int write_bytes;
+       int ret;
+
+       writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
+
+       /* Clear all interrupts. */
+       writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+       writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
+
+       reinit_completion(&cqspi->transfer_complete);
+       writel(CQSPI_REG_INDIRECTWR_START_MASK,
+              reg_base + CQSPI_REG_INDIRECTWR);
+
+       while (remaining > 0) {
+               write_bytes = remaining > page_size ? page_size : remaining;
+               writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
+
+               ret = wait_for_completion_timeout(&cqspi->transfer_complete,
+                                                 msecs_to_jiffies
+                                                 (CQSPI_TIMEOUT_MS));
+               if (!ret) {
+                       dev_err(nor->dev, "Indirect write timeout\n");
+                       ret = -ETIMEDOUT;
+                       goto failwr;
+               }
+
+               txbuf += write_bytes;
+               remaining -= write_bytes;
+
+               if (remaining > 0)
+                       reinit_completion(&cqspi->transfer_complete);
+       }
+
+       /* Check indirect done status */
+       ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
+                                CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
+       if (ret) {
+               dev_err(nor->dev,
+                       "Indirect write completion error (%i)\n", ret);
+               goto failwr;
+       }
+
+       /* Disable interrupt. */
+       writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+       /* Clear indirect completion status */
+       writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
+
+       cqspi_wait_idle(cqspi);
+
+       return 0;
+
+failwr:
+       /* Disable interrupt. */
+       writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+       /* Cancel the indirect write */
+       writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+              reg_base + CQSPI_REG_INDIRECTWR);
+       return ret;
+}
+
+static void cqspi_chipselect(struct spi_nor *nor)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int chip_select = f_pdata->cs;
+       unsigned int reg;
+
+       reg = readl(reg_base + CQSPI_REG_CONFIG);
+       if (cqspi->is_decoded_cs) {
+               reg |= CQSPI_REG_CONFIG_DECODE_MASK;
+       } else {
+               reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
+
+               /* Convert CS if without decoder.
+                * CS0 to 4b'1110
+                * CS1 to 4b'1101
+                * CS2 to 4b'1011
+                * CS3 to 4b'0111
+                */
+               chip_select = 0xF & ~(1 << chip_select);
+       }
+
+       reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
+                << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
+       reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
+           << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
+       writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *iobase = cqspi->iobase;
+       unsigned int reg;
+
+       /* configure page size and block size. */
+       reg = readl(iobase + CQSPI_REG_SIZE);
+       reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
+       reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
+       reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+       reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
+       reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
+       reg |= (nor->addr_width - 1);
+       writel(reg, iobase + CQSPI_REG_SIZE);
+
+       /* configure the chip select */
+       cqspi_chipselect(nor);
+
+       /* Store the new configuration of the controller */
+       cqspi->current_page_size = nor->page_size;
+       cqspi->current_erase_size = nor->mtd.erasesize;
+       cqspi->current_addr_width = nor->addr_width;
+}
+
+static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
+                                          const unsigned int ns_val)
+{
+       unsigned int ticks;
+
+       ticks = ref_clk_hz / 1000;      /* kHz */
+       ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
+
+       return ticks;
+}
+
+static void cqspi_delay(struct spi_nor *nor)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       void __iomem *iobase = cqspi->iobase;
+       const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+       unsigned int tshsl, tchsh, tslch, tsd2d;
+       unsigned int reg;
+       unsigned int tsclk;
+
+       /* calculate the number of ref ticks for one sclk tick */
+       tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
+
+       tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
+       /* this particular value must be at least one sclk */
+       if (tshsl < tsclk)
+               tshsl = tsclk;
+
+       tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
+       tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
+       tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
+
+       reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
+              << CQSPI_REG_DELAY_TSHSL_LSB;
+       reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
+               << CQSPI_REG_DELAY_TCHSH_LSB;
+       reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
+               << CQSPI_REG_DELAY_TSLCH_LSB;
+       reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
+               << CQSPI_REG_DELAY_TSD2D_LSB;
+       writel(reg, iobase + CQSPI_REG_DELAY);
+}
+
+static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
+{
+       const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+       void __iomem *reg_base = cqspi->iobase;
+       u32 reg, div;
+
+       /* Recalculate the baudrate divisor based on QSPI specification. */
+       div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
+
+       reg = readl(reg_base + CQSPI_REG_CONFIG);
+       reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
+       reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
+       writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_readdata_capture(struct cqspi_st *cqspi,
+                                  const unsigned int bypass,
+                                  const unsigned int delay)
+{
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int reg;
+
+       reg = readl(reg_base + CQSPI_REG_READCAPTURE);
+
+       if (bypass)
+               reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+       else
+               reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+
+       reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
+                << CQSPI_REG_READCAPTURE_DELAY_LSB);
+
+       reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
+               << CQSPI_REG_READCAPTURE_DELAY_LSB;
+
+       writel(reg, reg_base + CQSPI_REG_READCAPTURE);
+}
+
+static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
+{
+       void __iomem *reg_base = cqspi->iobase;
+       unsigned int reg;
+
+       reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+       if (enable)
+               reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
+       else
+               reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
+
+       writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure(struct spi_nor *nor)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+       const unsigned int sclk = f_pdata->clk_rate;
+       int switch_cs = (cqspi->current_cs != f_pdata->cs);
+       int switch_ck = (cqspi->sclk != sclk);
+
+       if ((cqspi->current_page_size != nor->page_size) ||
+           (cqspi->current_erase_size != nor->mtd.erasesize) ||
+           (cqspi->current_addr_width != nor->addr_width))
+               switch_cs = 1;
+
+       if (switch_cs || switch_ck)
+               cqspi_controller_enable(cqspi, 0);
+
+       /* Switch chip select. */
+       if (switch_cs) {
+               cqspi->current_cs = f_pdata->cs;
+               cqspi_configure_cs_and_sizes(nor);
+       }
+
+       /* Setup baudrate divisor and delays */
+       if (switch_ck) {
+               cqspi->sclk = sclk;
+               cqspi_config_baudrate_div(cqspi);
+               cqspi_delay(nor);
+               cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+       }
+
+       if (switch_cs || switch_ck)
+               cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_set_protocol(struct spi_nor *nor, const int read)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+
+       f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
+       f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
+       f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+
+       if (read) {
+               switch (nor->flash_read) {
+               case SPI_NOR_NORMAL:
+               case SPI_NOR_FAST:
+                       f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+                       break;
+               case SPI_NOR_DUAL:
+                       f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+                       break;
+               case SPI_NOR_QUAD:
+                       f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       cqspi_configure(nor);
+
+       return 0;
+}
+
+static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
+                          size_t len, const u_char *buf)
+{
+       int ret;
+
+       ret = cqspi_set_protocol(nor, 0);
+       if (ret)
+               return ret;
+
+       ret = cqspi_indirect_write_setup(nor, to);
+       if (ret)
+               return ret;
+
+       ret = cqspi_indirect_write_execute(nor, buf, len);
+       if (ret)
+               return ret;
+
+       return (ret < 0) ? ret : len;
+}
+
+static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
+                         size_t len, u_char *buf)
+{
+       int ret;
+
+       ret = cqspi_set_protocol(nor, 1);
+       if (ret)
+               return ret;
+
+       ret = cqspi_indirect_read_setup(nor, from);
+       if (ret)
+               return ret;
+
+       ret = cqspi_indirect_read_execute(nor, buf, len);
+       if (ret)
+               return ret;
+
+       return (ret < 0) ? ret : len;
+}
+
+static int cqspi_erase(struct spi_nor *nor, loff_t offs)
+{
+       int ret;
+
+       ret = cqspi_set_protocol(nor, 0);
+       if (ret)
+               return ret;
+
+       /* Send write enable, then erase commands. */
+       ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+       if (ret)
+               return ret;
+
+       /* Set up command buffer. */
+       ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+
+       mutex_lock(&cqspi->bus_mutex);
+
+       return 0;
+}
+
+static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+       struct cqspi_flash_pdata *f_pdata = nor->priv;
+       struct cqspi_st *cqspi = f_pdata->cqspi;
+
+       mutex_unlock(&cqspi->bus_mutex);
+}
+
+static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+       int ret;
+
+       ret = cqspi_set_protocol(nor, 0);
+       if (!ret)
+               ret = cqspi_command_read(nor, &opcode, 1, buf, len);
+
+       return ret;
+}
+
+static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+       int ret;
+
+       ret = cqspi_set_protocol(nor, 0);
+       if (!ret)
+               ret = cqspi_command_write(nor, opcode, buf, len);
+
+       return ret;
+}
+
+static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
+                                   struct cqspi_flash_pdata *f_pdata,
+                                   struct device_node *np)
+{
+       if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
+               dev_err(&pdev->dev, "couldn't determine read-delay\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
+               dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
+               dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
+               dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
+               dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
+               dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static int cqspi_of_get_pdata(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+
+       cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
+
+       if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
+               dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
+               dev_err(&pdev->dev, "couldn't determine fifo-width\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "cdns,trigger-address",
+                                &cqspi->trigger_address)) {
+               dev_err(&pdev->dev, "couldn't determine trigger-address\n");
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static void cqspi_controller_init(struct cqspi_st *cqspi)
+{
+       cqspi_controller_enable(cqspi, 0);
+
+       /* Configure the remap address register, no remap */
+       writel(0, cqspi->iobase + CQSPI_REG_REMAP);
+
+       /* Disable all interrupts. */
+       writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
+
+       /* Configure the SRAM split to 1:1 . */
+       writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
+
+       /* Load indirect trigger address. */
+       writel(cqspi->trigger_address,
+              cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
+
+       /* Program read watermark -- 1/2 of the FIFO. */
+       writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
+              cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
+       /* Program write watermark -- 1/8 of the FIFO. */
+       writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
+              cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+
+       cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
+{
+       struct platform_device *pdev = cqspi->pdev;
+       struct device *dev = &pdev->dev;
+       struct cqspi_flash_pdata *f_pdata;
+       struct spi_nor *nor;
+       struct mtd_info *mtd;
+       unsigned int cs;
+       int i, ret;
+
+       /* Get flash device data */
+       for_each_available_child_of_node(dev->of_node, np) {
+               if (of_property_read_u32(np, "reg", &cs)) {
+                       dev_err(dev, "Couldn't determine chip select.\n");
+                       goto err;
+               }
+
+               if (cs > CQSPI_MAX_CHIPSELECT) {
+                       dev_err(dev, "Chip select %d out of range.\n", cs);
+                       goto err;
+               }
+
+               f_pdata = &cqspi->f_pdata[cs];
+               f_pdata->cqspi = cqspi;
+               f_pdata->cs = cs;
+
+               ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
+               if (ret)
+                       goto err;
+
+               nor = &f_pdata->nor;
+               mtd = &nor->mtd;
+
+               mtd->priv = nor;
+
+               nor->dev = dev;
+               spi_nor_set_flash_node(nor, np);
+               nor->priv = f_pdata;
+
+               nor->read_reg = cqspi_read_reg;
+               nor->write_reg = cqspi_write_reg;
+               nor->read = cqspi_read;
+               nor->write = cqspi_write;
+               nor->erase = cqspi_erase;
+               nor->prepare = cqspi_prep;
+               nor->unprepare = cqspi_unprep;
+
+               mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
+                                          dev_name(dev), cs);
+               if (!mtd->name) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+               if (ret)
+                       goto err;
+
+               ret = mtd_device_register(mtd, NULL, 0);
+               if (ret)
+                       goto err;
+
+               f_pdata->registered = true;
+       }
+
+       return 0;
+
+err:
+       for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+               if (cqspi->f_pdata[i].registered)
+                       mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+       return ret;
+}
+
+static int cqspi_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct device *dev = &pdev->dev;
+       struct cqspi_st *cqspi;
+       struct resource *res;
+       struct resource *res_ahb;
+       int ret;
+       int irq;
+
+       cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
+       if (!cqspi)
+               return -ENOMEM;
+
+       mutex_init(&cqspi->bus_mutex);
+       cqspi->pdev = pdev;
+       platform_set_drvdata(pdev, cqspi);
+
+       /* Obtain configuration from OF. */
+       ret = cqspi_of_get_pdata(pdev);
+       if (ret) {
+               dev_err(dev, "Cannot get mandatory OF data.\n");
+               return -ENODEV;
+       }
+
+       /* Obtain QSPI clock. */
+       cqspi->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(cqspi->clk)) {
+               dev_err(dev, "Cannot claim QSPI clock.\n");
+               return PTR_ERR(cqspi->clk);
+       }
+
+       /* Obtain and remap controller address. */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       cqspi->iobase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(cqspi->iobase)) {
+               dev_err(dev, "Cannot remap controller address.\n");
+               return PTR_ERR(cqspi->iobase);
+       }
+
+       /* Obtain and remap AHB address. */
+       res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
+       if (IS_ERR(cqspi->ahb_base)) {
+               dev_err(dev, "Cannot remap AHB address.\n");
+               return PTR_ERR(cqspi->ahb_base);
+       }
+
+       init_completion(&cqspi->transfer_complete);
+
+       /* Obtain IRQ line. */
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "Cannot obtain IRQ.\n");
+               return -ENXIO;
+       }
+
+       ret = clk_prepare_enable(cqspi->clk);
+       if (ret) {
+               dev_err(dev, "Cannot enable QSPI clock.\n");
+               return ret;
+       }
+
+       cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+
+       ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+                              pdev->name, cqspi);
+       if (ret) {
+               dev_err(dev, "Cannot request IRQ.\n");
+               goto probe_irq_failed;
+       }
+
+       cqspi_wait_idle(cqspi);
+       cqspi_controller_init(cqspi);
+       cqspi->current_cs = -1;
+       cqspi->sclk = 0;
+
+       ret = cqspi_setup_flash(cqspi, np);
+       if (ret) {
+               dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
+               goto probe_setup_failed;
+       }
+
+       return ret;
+probe_irq_failed:
+       cqspi_controller_enable(cqspi, 0);
+probe_setup_failed:
+       clk_disable_unprepare(cqspi->clk);
+       return ret;
+}
+
+static int cqspi_remove(struct platform_device *pdev)
+{
+       struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+               if (cqspi->f_pdata[i].registered)
+                       mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+
+       cqspi_controller_enable(cqspi, 0);
+
+       clk_disable_unprepare(cqspi->clk);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cqspi_suspend(struct device *dev)
+{
+       struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+       cqspi_controller_enable(cqspi, 0);
+       return 0;
+}
+
+static int cqspi_resume(struct device *dev)
+{
+       struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+       cqspi_controller_enable(cqspi, 1);
+       return 0;
+}
+
+static const struct dev_pm_ops cqspi__dev_pm_ops = {
+       .suspend = cqspi_suspend,
+       .resume = cqspi_resume,
+};
+
+#define CQSPI_DEV_PM_OPS       (&cqspi__dev_pm_ops)
+#else
+#define CQSPI_DEV_PM_OPS       NULL
+#endif
+
+static struct of_device_id const cqspi_dt_ids[] = {
+       {.compatible = "cdns,qspi-nor",},
+       { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
+
+static struct platform_driver cqspi_platform_driver = {
+       .probe = cqspi_probe,
+       .remove = cqspi_remove,
+       .driver = {
+               .name = CQSPI_NAME,
+               .pm = CQSPI_DEV_PM_OPS,
+               .of_match_table = cqspi_dt_ids,
+       },
+};
+
+module_platform_driver(cqspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CQSPI_NAME);
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
index 9ab2b51d54b86c23392541dcd229dc62efeeb525..5c82e4ef1904b3bebcdd6fae8fc23bb37768df52 100644 (file)
@@ -618,9 +618,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
        qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
 }
 
-static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
                                u8 opcode, unsigned int to, u32 *txbuf,
-                               unsigned count, size_t *retlen)
+                               unsigned count)
 {
        int ret, i, j;
        u32 tmp;
@@ -647,8 +647,8 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
        /* Trigger it */
        ret = fsl_qspi_runcmd(q, opcode, to, count);
 
-       if (ret == 0 && retlen)
-               *retlen += count;
+       if (ret == 0)
+               return count;
 
        return ret;
 }
@@ -859,7 +859,9 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
 
        } else if (len > 0) {
                ret = fsl_qspi_nor_write(q, nor, opcode, 0,
-                                       (u32 *)buf, len, NULL);
+                                       (u32 *)buf, len);
+               if (ret > 0)
+                       return 0;
        } else {
                dev_err(q->dev, "invalid cmd %d\n", opcode);
                ret = -EINVAL;
@@ -868,20 +870,20 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
        return ret;
 }
 
-static void fsl_qspi_write(struct spi_nor *nor, loff_t to,
-               size_t len, size_t *retlen, const u_char *buf)
+static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
+                             size_t len, const u_char *buf)
 {
        struct fsl_qspi *q = nor->priv;
-
-       fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
-                               (u32 *)buf, len, retlen);
+       ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+                                        (u32 *)buf, len);
 
        /* invalid the data in the AHB buffer. */
        fsl_qspi_invalid(q);
+       return ret;
 }
 
-static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
-               size_t len, size_t *retlen, u_char *buf)
+static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
+                            size_t len, u_char *buf)
 {
        struct fsl_qspi *q = nor->priv;
        u8 cmd = nor->read_opcode;
@@ -923,8 +925,7 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
        memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
                len);
 
-       *retlen += len;
-       return 0;
+       return len;
 }
 
 static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
new file mode 100644 (file)
index 0000000..20378b0
--- /dev/null
@@ -0,0 +1,489 @@
+/*
+ * HiSilicon SPI Nor Flash Controller Driver
+ *
+ * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hardware register offsets and field definitions */
+#define FMC_CFG                                0x00
+#define FMC_CFG_OP_MODE_MASK           BIT_MASK(0)
+#define FMC_CFG_OP_MODE_BOOT           0
+#define FMC_CFG_OP_MODE_NORMAL         1
+#define FMC_CFG_FLASH_SEL(type)                (((type) & 0x3) << 1)
+#define FMC_CFG_FLASH_SEL_MASK         0x6
+#define FMC_ECC_TYPE(type)             (((type) & 0x7) << 5)
+#define FMC_ECC_TYPE_MASK              GENMASK(7, 5)
+#define SPI_NOR_ADDR_MODE_MASK         BIT_MASK(10)
+#define SPI_NOR_ADDR_MODE_3BYTES       (0x0 << 10)
+#define SPI_NOR_ADDR_MODE_4BYTES       (0x1 << 10)
+#define FMC_GLOBAL_CFG                 0x04
+#define FMC_GLOBAL_CFG_WP_ENABLE       BIT(6)
+#define FMC_SPI_TIMING_CFG             0x08
+#define TIMING_CFG_TCSH(nr)            (((nr) & 0xf) << 8)
+#define TIMING_CFG_TCSS(nr)            (((nr) & 0xf) << 4)
+#define TIMING_CFG_TSHSL(nr)           ((nr) & 0xf)
+#define CS_HOLD_TIME                   0x6
+#define CS_SETUP_TIME                  0x6
+#define CS_DESELECT_TIME               0xf
+#define FMC_INT                                0x18
+#define FMC_INT_OP_DONE                        BIT(0)
+#define FMC_INT_CLR                    0x20
+#define FMC_CMD                                0x24
+#define FMC_CMD_CMD1(cmd)              ((cmd) & 0xff)
+#define FMC_ADDRL                      0x2c
+#define FMC_OP_CFG                     0x30
+#define OP_CFG_FM_CS(cs)               ((cs) << 11)
+#define OP_CFG_MEM_IF_TYPE(type)       (((type) & 0x7) << 7)
+#define OP_CFG_ADDR_NUM(addr)          (((addr) & 0x7) << 4)
+#define OP_CFG_DUMMY_NUM(dummy)                ((dummy) & 0xf)
+#define FMC_DATA_NUM                   0x38
+#define FMC_DATA_NUM_CNT(cnt)          ((cnt) & GENMASK(13, 0))
+#define FMC_OP                         0x3c
+#define FMC_OP_DUMMY_EN                        BIT(8)
+#define FMC_OP_CMD1_EN                 BIT(7)
+#define FMC_OP_ADDR_EN                 BIT(6)
+#define FMC_OP_WRITE_DATA_EN           BIT(5)
+#define FMC_OP_READ_DATA_EN            BIT(2)
+#define FMC_OP_READ_STATUS_EN          BIT(1)
+#define FMC_OP_REG_OP_START            BIT(0)
+#define FMC_DMA_LEN                    0x40
+#define FMC_DMA_LEN_SET(len)           ((len) & GENMASK(27, 0))
+#define FMC_DMA_SADDR_D0               0x4c
+#define HIFMC_DMA_MAX_LEN              (4096)
+#define HIFMC_DMA_MASK                 (HIFMC_DMA_MAX_LEN - 1)
+#define FMC_OP_DMA                     0x68
+#define OP_CTRL_RD_OPCODE(code)                (((code) & 0xff) << 16)
+#define OP_CTRL_WR_OPCODE(code)                (((code) & 0xff) << 8)
+#define OP_CTRL_RW_OP(op)              ((op) << 1)
+#define OP_CTRL_DMA_OP_READY           BIT(0)
+#define FMC_OP_READ                    0x0
+#define FMC_OP_WRITE                   0x1
+#define FMC_WAIT_TIMEOUT               1000000
+
+enum hifmc_iftype {
+       IF_TYPE_STD,
+       IF_TYPE_DUAL,
+       IF_TYPE_DIO,
+       IF_TYPE_QUAD,
+       IF_TYPE_QIO,
+};
+
+struct hifmc_priv {
+       u32 chipselect;
+       u32 clkrate;
+       struct hifmc_host *host;
+};
+
+#define HIFMC_MAX_CHIP_NUM             2
+struct hifmc_host {
+       struct device *dev;
+       struct mutex lock;
+
+       void __iomem *regbase;
+       void __iomem *iobase;
+       struct clk *clk;
+       void *buffer;
+       dma_addr_t dma_buffer;
+
+       struct spi_nor  *nor[HIFMC_MAX_CHIP_NUM];
+       u32 num_chip;
+};
+
+static inline int wait_op_finish(struct hifmc_host *host)
+{
+       u32 reg;
+
+       return readl_poll_timeout(host->regbase + FMC_INT, reg,
+               (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
+}
+
+static int get_if_type(enum read_mode flash_read)
+{
+       enum hifmc_iftype if_type;
+
+       switch (flash_read) {
+       case SPI_NOR_DUAL:
+               if_type = IF_TYPE_DUAL;
+               break;
+       case SPI_NOR_QUAD:
+               if_type = IF_TYPE_QUAD;
+               break;
+       case SPI_NOR_NORMAL:
+       case SPI_NOR_FAST:
+       default:
+               if_type = IF_TYPE_STD;
+               break;
+       }
+
+       return if_type;
+}
+
+static void hisi_spi_nor_init(struct hifmc_host *host)
+{
+       u32 reg;
+
+       reg = TIMING_CFG_TCSH(CS_HOLD_TIME)
+               | TIMING_CFG_TCSS(CS_SETUP_TIME)
+               | TIMING_CFG_TSHSL(CS_DESELECT_TIME);
+       writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
+}
+
+static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+       int ret;
+
+       mutex_lock(&host->lock);
+
+       ret = clk_set_rate(host->clk, priv->clkrate);
+       if (ret)
+               goto out;
+
+       ret = clk_prepare_enable(host->clk);
+       if (ret)
+               goto out;
+
+       return 0;
+
+out:
+       mutex_unlock(&host->lock);
+       return ret;
+}
+
+static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+
+       clk_disable_unprepare(host->clk);
+       mutex_unlock(&host->lock);
+}
+
+static int hisi_spi_nor_op_reg(struct spi_nor *nor,
+                               u8 opcode, int len, u8 optype)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+       u32 reg;
+
+       reg = FMC_CMD_CMD1(opcode);
+       writel(reg, host->regbase + FMC_CMD);
+
+       reg = FMC_DATA_NUM_CNT(len);
+       writel(reg, host->regbase + FMC_DATA_NUM);
+
+       reg = OP_CFG_FM_CS(priv->chipselect);
+       writel(reg, host->regbase + FMC_OP_CFG);
+
+       writel(0xff, host->regbase + FMC_INT_CLR);
+       reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
+       writel(reg, host->regbase + FMC_OP);
+
+       return wait_op_finish(host);
+}
+
+static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+               int len)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+       int ret;
+
+       ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN);
+       if (ret)
+               return ret;
+
+       memcpy_fromio(buf, host->iobase, len);
+       return 0;
+}
+
+static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
+                               u8 *buf, int len)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+
+       if (len)
+               memcpy_toio(host->iobase, buf, len);
+
+       return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN);
+}
+
+static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
+               dma_addr_t dma_buf, size_t len, u8 op_type)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+       u8 if_type = 0;
+       u32 reg;
+
+       reg = readl(host->regbase + FMC_CFG);
+       reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
+       reg |= FMC_CFG_OP_MODE_NORMAL;
+       reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+               : SPI_NOR_ADDR_MODE_3BYTES;
+       writel(reg, host->regbase + FMC_CFG);
+
+       writel(start_off, host->regbase + FMC_ADDRL);
+       writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0);
+       writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN);
+
+       reg = OP_CFG_FM_CS(priv->chipselect);
+       if_type = get_if_type(nor->flash_read);
+       reg |= OP_CFG_MEM_IF_TYPE(if_type);
+       if (op_type == FMC_OP_READ)
+               reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
+       writel(reg, host->regbase + FMC_OP_CFG);
+
+       writel(0xff, host->regbase + FMC_INT_CLR);
+       reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY;
+       reg |= (op_type == FMC_OP_READ)
+               ? OP_CTRL_RD_OPCODE(nor->read_opcode)
+               : OP_CTRL_WR_OPCODE(nor->program_opcode);
+       writel(reg, host->regbase + FMC_OP_DMA);
+
+       return wait_op_finish(host);
+}
+
+static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
+               u_char *read_buf)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+       size_t offset;
+       int ret;
+
+       for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+               size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+               ret = hisi_spi_nor_dma_transfer(nor,
+                       from + offset, host->dma_buffer, trans, FMC_OP_READ);
+               if (ret) {
+                       dev_warn(nor->dev, "DMA read timeout\n");
+                       return ret;
+               }
+               memcpy(read_buf + offset, host->buffer, trans);
+       }
+
+       return len;
+}
+
+static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
+                       size_t len, const u_char *write_buf)
+{
+       struct hifmc_priv *priv = nor->priv;
+       struct hifmc_host *host = priv->host;
+       size_t offset;
+       int ret;
+
+       for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+               size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+               memcpy(host->buffer, write_buf + offset, trans);
+               ret = hisi_spi_nor_dma_transfer(nor,
+                       to + offset, host->dma_buffer, trans, FMC_OP_WRITE);
+               if (ret) {
+                       dev_warn(nor->dev, "DMA write timeout\n");
+                       return ret;
+               }
+       }
+
+       return len;
+}
+
+/**
+ * Get spi flash device information and register it as a mtd device.
+ */
+static int hisi_spi_nor_register(struct device_node *np,
+                               struct hifmc_host *host)
+{
+       struct device *dev = host->dev;
+       struct spi_nor *nor;
+       struct hifmc_priv *priv;
+       struct mtd_info *mtd;
+       int ret;
+
+       nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
+       if (!nor)
+               return -ENOMEM;
+
+       nor->dev = dev;
+       spi_nor_set_flash_node(nor, np);
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       ret = of_property_read_u32(np, "reg", &priv->chipselect);
+       if (ret) {
+               dev_err(dev, "There's no reg property for %s\n",
+                       np->full_name);
+               return ret;
+       }
+
+       ret = of_property_read_u32(np, "spi-max-frequency",
+                       &priv->clkrate);
+       if (ret) {
+               dev_err(dev, "There's no spi-max-frequency property for %s\n",
+                       np->full_name);
+               return ret;
+       }
+       priv->host = host;
+       nor->priv = priv;
+
+       nor->prepare = hisi_spi_nor_prep;
+       nor->unprepare = hisi_spi_nor_unprep;
+       nor->read_reg = hisi_spi_nor_read_reg;
+       nor->write_reg = hisi_spi_nor_write_reg;
+       nor->read = hisi_spi_nor_read;
+       nor->write = hisi_spi_nor_write;
+       nor->erase = NULL;
+       ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+       if (ret)
+               return ret;
+
+       mtd = &nor->mtd;
+       mtd->name = np->name;
+       ret = mtd_device_register(mtd, NULL, 0);
+       if (ret)
+               return ret;
+
+       host->nor[host->num_chip] = nor;
+       host->num_chip++;
+       return 0;
+}
+
+static void hisi_spi_nor_unregister_all(struct hifmc_host *host)
+{
+       int i;
+
+       for (i = 0; i < host->num_chip; i++)
+               mtd_device_unregister(&host->nor[i]->mtd);
+}
+
+static int hisi_spi_nor_register_all(struct hifmc_host *host)
+{
+       struct device *dev = host->dev;
+       struct device_node *np;
+       int ret;
+
+       for_each_available_child_of_node(dev->of_node, np) {
+               ret = hisi_spi_nor_register(np, host);
+               if (ret)
+                       goto fail;
+
+               if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+                       dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+                       break;
+               }
+       }
+
+       return 0;
+
+fail:
+       hisi_spi_nor_unregister_all(host);
+       return ret;
+}
+
+static int hisi_spi_nor_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       struct hifmc_host *host;
+       int ret;
+
+       host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+       if (!host)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, host);
+       host->dev = dev;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+       host->regbase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(host->regbase))
+               return PTR_ERR(host->regbase);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
+       host->iobase = devm_ioremap_resource(dev, res);
+       if (IS_ERR(host->iobase))
+               return PTR_ERR(host->iobase);
+
+       host->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(host->clk))
+               return PTR_ERR(host->clk);
+
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret) {
+               dev_warn(dev, "Unable to set dma mask\n");
+               return ret;
+       }
+
+       host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN,
+                       &host->dma_buffer, GFP_KERNEL);
+       if (!host->buffer)
+               return -ENOMEM;
+
+       mutex_init(&host->lock);
+       clk_prepare_enable(host->clk);
+       hisi_spi_nor_init(host);
+       ret = hisi_spi_nor_register_all(host);
+       if (ret)
+               mutex_destroy(&host->lock);
+
+       clk_disable_unprepare(host->clk);
+       return ret;
+}
+
+static int hisi_spi_nor_remove(struct platform_device *pdev)
+{
+       struct hifmc_host *host = platform_get_drvdata(pdev);
+
+       hisi_spi_nor_unregister_all(host);
+       mutex_destroy(&host->lock);
+       clk_disable_unprepare(host->clk);
+       return 0;
+}
+
+static const struct of_device_id hisi_spi_nor_dt_ids[] = {
+       { .compatible = "hisilicon,fmc-spi-nor"},
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids);
+
+static struct platform_driver hisi_spi_nor_driver = {
+       .driver = {
+               .name   = "hisi-sfc",
+               .of_match_table = hisi_spi_nor_dt_ids,
+       },
+       .probe  = hisi_spi_nor_probe,
+       .remove = hisi_spi_nor_remove,
+};
+module_platform_driver(hisi_spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver");
index 8bed1a4cb79ce585d88623dc7d6784efaf58ccb7..e661877c23deacbb7a6ac54603d9c0ed7e162af3 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/ioport.h>
 #include <linux/math64.h>
 #include <linux/module.h>
-#include <linux/mtd/mtd.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -243,8 +242,8 @@ static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
        writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
 }
 
-static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
-                          size_t *retlen, u_char *buffer)
+static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+                              u_char *buffer)
 {
        int i, ret;
        int addr = (int)from;
@@ -255,13 +254,13 @@ static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
        mt8173_nor_set_read_mode(mt8173_nor);
        mt8173_nor_set_addr(mt8173_nor, addr);
 
-       for (i = 0; i < length; i++, (*retlen)++) {
+       for (i = 0; i < length; i++) {
                ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
                if (ret < 0)
                        return ret;
                buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
        }
-       return 0;
+       return length;
 }
 
 static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
@@ -297,36 +296,44 @@ static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
        return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
 }
 
-static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
-                            size_t *retlen, const u_char *buf)
+static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+                               const u_char *buf)
 {
        int ret;
        struct mt8173_nor *mt8173_nor = nor->priv;
+       size_t i;
 
        ret = mt8173_nor_write_buffer_enable(mt8173_nor);
-       if (ret < 0)
+       if (ret < 0) {
                dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+               return ret;
+       }
 
-       while (len >= SFLASH_WRBUF_SIZE) {
+       for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
                ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
-               if (ret < 0)
+               if (ret < 0) {
                        dev_err(mt8173_nor->dev, "write buffer failed!\n");
-               len -= SFLASH_WRBUF_SIZE;
+                       return ret;
+               }
                to += SFLASH_WRBUF_SIZE;
                buf += SFLASH_WRBUF_SIZE;
-               (*retlen) += SFLASH_WRBUF_SIZE;
        }
        ret = mt8173_nor_write_buffer_disable(mt8173_nor);
-       if (ret < 0)
+       if (ret < 0) {
                dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+               return ret;
+       }
 
-       if (len) {
-               ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len,
-                                                  (u8 *)buf);
-               if (ret < 0)
+       if (i < len) {
+               ret = mt8173_nor_write_single_byte(mt8173_nor, to,
+                                                  (int)(len - i), (u8 *)buf);
+               if (ret < 0) {
                        dev_err(mt8173_nor->dev, "write single byte failed!\n");
-               (*retlen) += len;
+                       return ret;
+               }
        }
+
+       return len;
 }
 
 static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
index ae428cb0e04bb7077674adfa961d83c211501618..73a14f40928be2536d9c0d1256c26ce0b3e54755 100644 (file)
@@ -172,8 +172,8 @@ static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
        return nxp_spifi_wait_for_cmd(spifi);
 }
 
-static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
-                         size_t *retlen, u_char *buf)
+static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+                             u_char *buf)
 {
        struct nxp_spifi *spifi = nor->priv;
        int ret;
@@ -183,24 +183,23 @@ static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
                return ret;
 
        memcpy_fromio(buf, spifi->flash_base + from, len);
-       *retlen += len;
 
-       return 0;
+       return len;
 }
 
-static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
-                           size_t *retlen, const u_char *buf)
+static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+                              const u_char *buf)
 {
        struct nxp_spifi *spifi = nor->priv;
        u32 cmd;
        int ret;
+       size_t i;
 
        ret = nxp_spifi_set_memory_mode_off(spifi);
        if (ret)
-               return;
+               return ret;
 
        writel(to, spifi->io_base + SPIFI_ADDR);
-       *retlen += len;
 
        cmd = SPIFI_CMD_DOUT |
              SPIFI_CMD_DATALEN(len) |
@@ -209,10 +208,14 @@ static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
              SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
        writel(cmd, spifi->io_base + SPIFI_CMD);
 
-       while (len--)
-               writeb(*buf++, spifi->io_base + SPIFI_DATA);
+       for (i = 0; i < len; i++)
+               writeb(buf[i], spifi->io_base + SPIFI_DATA);
+
+       ret = nxp_spifi_wait_for_cmd(spifi);
+       if (ret)
+               return ret;
 
-       nxp_spifi_wait_for_cmd(spifi);
+       return len;
 }
 
 static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
index c52e45594bfd6e78775f6250314c9cf26a049e2a..d0fc165d7d666cac0ffc69b6c68fbdd49bc2b29c 100644 (file)
@@ -661,7 +661,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
        status_new = (status_old & ~mask & ~SR_TB) | val;
 
        /* Don't protect status register if we're fully unlocked */
-       if (lock_len == mtd->size)
+       if (lock_len == 0)
                status_new &= ~SR_SRWD;
 
        if (!use_top)
@@ -830,10 +830,26 @@ static const struct flash_info spi_nor_ids[] = {
        { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
 
        /* GigaDevice */
-       { "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) },
-       { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
-       { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
+       {
+               "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
+       {
+               "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
+       {
+               "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
+       {
+               "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+                       SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+                       SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+       },
 
        /* Intel/Numonyx -- xxxs33b */
        { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
@@ -871,6 +887,7 @@ static const struct flash_info spi_nor_ids[] = {
        { "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
        { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
        { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+       { "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
 
        /* PMC */
        { "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
@@ -1031,8 +1048,25 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
        if (ret)
                return ret;
 
-       ret = nor->read(nor, from, len, retlen, buf);
+       while (len) {
+               ret = nor->read(nor, from, len, buf);
+               if (ret == 0) {
+                       /* We shouldn't see 0-length reads */
+                       ret = -EIO;
+                       goto read_err;
+               }
+               if (ret < 0)
+                       goto read_err;
+
+               WARN_ON(ret > len);
+               *retlen += ret;
+               buf += ret;
+               from += ret;
+               len -= ret;
+       }
+       ret = 0;
 
+read_err:
        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
        return ret;
 }
@@ -1060,10 +1094,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
                nor->program_opcode = SPINOR_OP_BP;
 
                /* write one byte. */
-               nor->write(nor, to, 1, retlen, buf);
+               ret = nor->write(nor, to, 1, buf);
+               if (ret < 0)
+                       goto sst_write_err;
+               WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+                    (int)ret);
                ret = spi_nor_wait_till_ready(nor);
                if (ret)
-                       goto time_out;
+                       goto sst_write_err;
        }
        to += actual;
 
@@ -1072,10 +1110,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
                nor->program_opcode = SPINOR_OP_AAI_WP;
 
                /* write two bytes. */
-               nor->write(nor, to, 2, retlen, buf + actual);
+               ret = nor->write(nor, to, 2, buf + actual);
+               if (ret < 0)
+                       goto sst_write_err;
+               WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
+                    (int)ret);
                ret = spi_nor_wait_till_ready(nor);
                if (ret)
-                       goto time_out;
+                       goto sst_write_err;
                to += 2;
                nor->sst_write_second = true;
        }
@@ -1084,21 +1126,26 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
        write_disable(nor);
        ret = spi_nor_wait_till_ready(nor);
        if (ret)
-               goto time_out;
+               goto sst_write_err;
 
        /* Write out trailing byte if it exists. */
        if (actual != len) {
                write_enable(nor);
 
                nor->program_opcode = SPINOR_OP_BP;
-               nor->write(nor, to, 1, retlen, buf + actual);
-
+               ret = nor->write(nor, to, 1, buf + actual);
+               if (ret < 0)
+                       goto sst_write_err;
+               WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+                    (int)ret);
                ret = spi_nor_wait_till_ready(nor);
                if (ret)
-                       goto time_out;
+                       goto sst_write_err;
                write_disable(nor);
+               actual += 1;
        }
-time_out:
+sst_write_err:
+       *retlen += actual;
        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
        return ret;
 }
@@ -1112,8 +1159,8 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
        size_t *retlen, const u_char *buf)
 {
        struct spi_nor *nor = mtd_to_spi_nor(mtd);
-       u32 page_offset, page_size, i;
-       int ret;
+       size_t page_offset, page_remain, i;
+       ssize_t ret;
 
        dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
 
@@ -1121,35 +1168,37 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
        if (ret)
                return ret;
 
-       write_enable(nor);
-
-       page_offset = to & (nor->page_size - 1);
+       for (i = 0; i < len; ) {
+               ssize_t written;
 
-       /* do all the bytes fit onto one page? */
-       if (page_offset + len <= nor->page_size) {
-               nor->write(nor, to, len, retlen, buf);
-       } else {
+               page_offset = (to + i) & (nor->page_size - 1);
+               WARN_ONCE(page_offset,
+                         "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.",
+                         page_offset);
                /* the size of data remaining on the first page */
-               page_size = nor->page_size - page_offset;
-               nor->write(nor, to, page_size, retlen, buf);
-
-               /* write everything in nor->page_size chunks */
-               for (i = page_size; i < len; i += page_size) {
-                       page_size = len - i;
-                       if (page_size > nor->page_size)
-                               page_size = nor->page_size;
+               page_remain = min_t(size_t,
+                                   nor->page_size - page_offset, len - i);
 
-                       ret = spi_nor_wait_till_ready(nor);
-                       if (ret)
-                               goto write_err;
-
-                       write_enable(nor);
+               write_enable(nor);
+               ret = nor->write(nor, to + i, page_remain, buf + i);
+               if (ret < 0)
+                       goto write_err;
+               written = ret;
 
-                       nor->write(nor, to + i, page_size, retlen, buf + i);
+               ret = spi_nor_wait_till_ready(nor);
+               if (ret)
+                       goto write_err;
+               *retlen += written;
+               i += written;
+               if (written != page_remain) {
+                       dev_err(nor->dev,
+                               "While writing %zu bytes written %zd bytes\n",
+                               page_remain, written);
+                       ret = -EIO;
+                       goto write_err;
                }
        }
 
-       ret = spi_nor_wait_till_ready(nor);
 write_err:
        spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
        return ret;
index daf82ba7aba038ba4ca322921826cf23d748076f..41b13d1cdcc44dede4d5c6b64eb03e322b05a0ff 100644 (file)
@@ -380,8 +380,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
                " block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
                block_address);
 
-       if (block_address >= ssfdc->map_len)
-               BUG();
+       BUG_ON(block_address >= ssfdc->map_len);
 
        block_address = ssfdc->logic_block_map[block_address];
 
index 09a4ccac53a2e74dd29aae1c9e8d41baad505609..f26dec896afa87f9667e5afd13f713d4cb5c98d3 100644 (file)
@@ -290,7 +290,7 @@ static int overwrite_test(void)
 
        while (opno < max_overwrite) {
 
-               err = rewrite_page(0);
+               err = write_page(0);
                if (err)
                        break;
 
index e708e360a9e3919df7b4e7b6e8500e5c27d24f55..6453148d066a7d6dc698a84ba970d90859d52d85 100644 (file)
@@ -1251,7 +1251,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct alx_priv *alx;
        struct alx_hw *hw;
        bool phy_configured;
-       int bars, err;
+       int err;
 
        err = pci_enable_device_mem(pdev);
        if (err)
@@ -1271,11 +1271,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
-       bars = pci_select_bars(pdev, IORESOURCE_MEM);
-       err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+       err = pci_request_mem_regions(pdev, alx_drv_name);
        if (err) {
                dev_err(&pdev->dev,
-                       "pci_request_selected_regions failed(bars:%d)\n", bars);
+                       "pci_request_mem_regions failed\n");
                goto out_pci_disable;
        }
 
@@ -1401,7 +1400,7 @@ out_unmap:
 out_free_netdev:
        free_netdev(netdev);
 out_pci_release:
-       pci_release_selected_regions(pdev, bars);
+       pci_release_mem_regions(pdev);
 out_pci_disable:
        pci_disable_device(pdev);
        return err;
@@ -1420,8 +1419,7 @@ static void alx_remove(struct pci_dev *pdev)
 
        unregister_netdev(alx->dev);
        iounmap(hw->hw_addr);
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
index 41f32c0b341ed08829cfb14415c397455fcef753..02f443958f3199aaf95c786ef1b59bd2948f4937 100644 (file)
@@ -7330,8 +7330,7 @@ err_flashmap:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -7398,8 +7397,7 @@ static void e1000_remove(struct pci_dev *pdev)
        if ((adapter->hw.flash_address) &&
            (adapter->hw.mac.type < e1000_pch_spt))
                iounmap(adapter->hw.flash_address);
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 
        free_netdev(netdev);
 
index b8245c734c969442bd6aa5d999fcff9c194ed62d..774a5654bf42489a8f7c37f7563d07144b30ec4c 100644 (file)
@@ -1963,10 +1963,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_dma;
        }
 
-       err = pci_request_selected_regions(pdev,
-                                          pci_select_bars(pdev,
-                                                          IORESOURCE_MEM),
-                                          fm10k_driver_name);
+       err = pci_request_mem_regions(pdev, fm10k_driver_name);
        if (err) {
                dev_err(&pdev->dev,
                        "pci_request_selected_regions failed: %d\n", err);
@@ -2070,8 +2067,7 @@ err_sw_init:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_netdev:
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -2119,8 +2115,7 @@ static void fm10k_remove(struct pci_dev *pdev)
 
        free_netdev(netdev);
 
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 
        pci_disable_pcie_error_reporting(pdev);
 
index 339d99be4702599e0906ec1d4468d248216a575d..81c99e1be708d445270323ece7c6c63cbc681111 100644 (file)
@@ -10710,8 +10710,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* set up pci connections */
-       err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
-                                          IORESOURCE_MEM), i40e_driver_name);
+       err = pci_request_mem_regions(pdev, i40e_driver_name);
        if (err) {
                dev_info(&pdev->dev,
                         "pci_request_selected_regions failed %d\n", err);
@@ -11208,8 +11207,7 @@ err_ioremap:
        kfree(pf);
 err_pf_alloc:
        pci_disable_pcie_error_reporting(pdev);
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -11320,8 +11318,7 @@ static void i40e_remove(struct pci_dev *pdev)
 
        iounmap(hw->hw_addr);
        kfree(pf);
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
index 9bcba42abb919c3a4df996773b909a9f93620862..942a89fb009088619936ea8e3ad909287d76ceca 100644 (file)
@@ -2324,9 +2324,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
-       err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
-                                          IORESOURCE_MEM),
-                                          igb_driver_name);
+       err = pci_request_mem_regions(pdev, igb_driver_name);
        if (err)
                goto err_pci_reg;
 
@@ -2750,8 +2748,7 @@ err_sw_init:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -2916,8 +2913,7 @@ static void igb_remove(struct pci_dev *pdev)
        pci_iounmap(pdev, adapter->io_addr);
        if (hw->flash_address)
                iounmap(hw->flash_address);
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 
        kfree(adapter->shadow_vfta);
        free_netdev(netdev);
index 7871f538f0ad0ea67e4d1564d19552706e5a501b..5418c69a74630bdd8b2aa2519c9e7f41c3e76ac9 100644 (file)
@@ -9353,8 +9353,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                pci_using_dac = 0;
        }
 
-       err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
-                                          IORESOURCE_MEM), ixgbe_driver_name);
+       err = pci_request_mem_regions(pdev, ixgbe_driver_name);
        if (err) {
                dev_err(&pdev->dev,
                        "pci_request_selected_regions failed 0x%x\n", err);
@@ -9740,8 +9739,7 @@ err_ioremap:
        disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 err_alloc_etherdev:
-       pci_release_selected_regions(pdev,
-                                    pci_select_bars(pdev, IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
        if (!adapter || disable_dev)
@@ -9808,8 +9806,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 
 #endif
        iounmap(adapter->io_addr);
-       pci_release_selected_regions(pdev, pci_select_bars(pdev,
-                                    IORESOURCE_MEM));
+       pci_release_mem_regions(pdev);
 
        e_dev_info("complete\n");
 
index 4cb9b156cab7dfadefa3ab613b18e584b6aa6fbc..d7c33f9361aa0361d762d1da31368f03bdca3082 100644 (file)
@@ -1661,14 +1661,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
 
 static void nvme_dev_unmap(struct nvme_dev *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
-       int bars;
-
        if (dev->bar)
                iounmap(dev->bar);
-
-       bars = pci_select_bars(pdev, IORESOURCE_MEM);
-       pci_release_selected_regions(pdev, bars);
+       pci_release_mem_regions(to_pci_dev(dev->dev));
 }
 
 static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1897,13 +1892,9 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
 
 static int nvme_dev_map(struct nvme_dev *dev)
 {
-       int bars;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-       bars = pci_select_bars(pdev, IORESOURCE_MEM);
-       if (!bars)
-               return -ENODEV;
-       if (pci_request_selected_regions(pdev, bars, "nvme"))
+       if (pci_request_mem_regions(pdev, "nvme"))
                return -ENODEV;
 
        dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -1912,7 +1903,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
 
        return 0;
   release:
-       pci_release_selected_regions(pdev, bars);
+       pci_release_mem_regions(pdev);
        return -ENODEV;
 }
 
index 56389be5d08b666f9a5e57da836a709421c4237b..67f9916ff14d2bb168916b272e9e45aded4b5c7c 100644 (file)
@@ -25,7 +25,7 @@ config PCI_MSI
           If you don't know what to do here, say Y.
 
 config PCI_MSI_IRQ_DOMAIN
-       bool
+       def_bool ARM || ARM64 || X86
        depends on PCI_MSI
        select GENERIC_MSI_IRQ_DOMAIN
 
index dd7cdbee8029d5a51a60c63bb24a5772c0b84de9..c288e5a525754dbebe3165f44e5cbe0569d7ec21 100644 (file)
@@ -91,6 +91,35 @@ void pci_bus_remove_resources(struct pci_bus *bus)
        }
 }
 
+int devm_request_pci_bus_resources(struct device *dev,
+                                  struct list_head *resources)
+{
+       struct resource_entry *win;
+       struct resource *parent, *res;
+       int err;
+
+       resource_list_for_each_entry(win, resources) {
+               res = win->res;
+               switch (resource_type(res)) {
+               case IORESOURCE_IO:
+                       parent = &ioport_resource;
+                       break;
+               case IORESOURCE_MEM:
+                       parent = &iomem_resource;
+                       break;
+               default:
+                       continue;
+               }
+
+               err = devm_request_resource(dev, parent, res);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
+
 static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
 #ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
 static struct pci_bus_region pci_64_bit = {0,
@@ -291,6 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev)
        pci_fixup_device(pci_fixup_final, dev);
        pci_create_sysfs_dev_files(dev);
        pci_proc_attach_device(dev);
+       pci_bridge_d3_device_changed(dev);
 
        dev->match_driver = true;
        retval = device_attach(&dev->dev);
@@ -397,4 +427,3 @@ void pci_bus_put(struct pci_bus *bus)
                put_device(&bus->dev);
 }
 EXPORT_SYMBOL(pci_bus_put);
-
index f9832ad8efe2fd4ded2e4d7b636cd05f7a80a520..66e0d718472f2de7ec8408b567908d13b824c93f 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pci-ecam.h>
 #include <linux/slab.h>
 
-#include "ecam.h"
-
 /*
  * On 64-bit systems, we do a single ioremap for the whole config space
  * since we have enough virtual address range available.  On 32-bit, we
@@ -52,6 +51,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
        if (!cfg)
                return ERR_PTR(-ENOMEM);
 
+       cfg->parent = dev;
        cfg->ops = ops;
        cfg->busr.start = busr->start;
        cfg->busr.end = busr->end;
@@ -95,7 +95,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
        }
 
        if (ops->init) {
-               err = ops->init(dev, cfg);
+               err = ops->init(cfg);
                if (err)
                        goto err_exit;
        }
diff --git a/drivers/pci/ecam.h b/drivers/pci/ecam.h
deleted file mode 100644 (file)
index 9878beb..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation (the "GPL").
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 (GPLv2) for more details.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 (GPLv2) along with this source code.
- */
-#ifndef DRIVERS_PCI_ECAM_H
-#define DRIVERS_PCI_ECAM_H
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-/*
- * struct to hold pci ops and bus shift of the config window
- * for a PCI controller.
- */
-struct pci_config_window;
-struct pci_ecam_ops {
-       unsigned int                    bus_shift;
-       struct pci_ops                  pci_ops;
-       int                             (*init)(struct device *,
-                                               struct pci_config_window *);
-};
-
-/*
- * struct to hold the mappings of a config space window. This
- * is expected to be used as sysdata for PCI controllers that
- * use ECAM.
- */
-struct pci_config_window {
-       struct resource                 res;
-       struct resource                 busr;
-       void                            *priv;
-       struct pci_ecam_ops             *ops;
-       union {
-               void __iomem            *win;   /* 64-bit single mapping */
-               void __iomem            **winp; /* 32-bit per-bus mapping */
-       };
-};
-
-/* create and free pci_config_window */
-struct pci_config_window *pci_ecam_create(struct device *dev,
-               struct resource *cfgres, struct resource *busr,
-               struct pci_ecam_ops *ops);
-void pci_ecam_free(struct pci_config_window *cfg);
-
-/* map_bus when ->sysdata is an instance of pci_config_window */
-void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
-                              int where);
-/* default ECAM ops */
-extern struct pci_ecam_ops pci_generic_ecam_ops;
-
-#ifdef CONFIG_PCI_HOST_GENERIC
-/* for DT-based PCI controllers that support ECAM */
-int pci_host_common_probe(struct platform_device *pdev,
-                         struct pci_ecam_ops *ops);
-#endif
-#endif
index 5d2374e4ee7fb53da52cd74c26acf65a8e7af6f0..9b485d873b0d336a1fb70a5aae1e3c96e4f26f01 100644 (file)
@@ -3,8 +3,9 @@ menu "PCI host controller drivers"
 
 config PCI_DRA7XX
        bool "TI DRA7xx PCIe controller"
-       select PCIE_DW
        depends on OF && HAS_IOMEM && TI_PIPE3
+       depends on PCI_MSI_IRQ_DOMAIN
+       select PCIE_DW
        help
         Enables support for the PCIe controller in the DRA7xx SoC.  There
         are two instances of PCIe controller in DRA7xx.  This controller can
@@ -16,11 +17,20 @@ config PCI_MVEBU
        depends on ARM
        depends on OF
 
+config PCI_AARDVARK
+       bool "Aardvark PCIe controller"
+       depends on ARCH_MVEBU && ARM64
+       depends on OF
+       depends on PCI_MSI_IRQ_DOMAIN
+       help
+        Add support for Aardvark 64bit PCIe Host Controller. This
+        controller is part of the South Bridge of the Marvel Armada
+        3700 SoC.
 
 config PCIE_XILINX_NWL
        bool "NWL PCIe Core"
        depends on ARCH_ZYNQMP
-       select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+       depends on PCI_MSI_IRQ_DOMAIN
        help
         Say 'Y' here if you want kernel support for Xilinx
         NWL PCIe controller. The controller can act as Root Port
@@ -29,6 +39,7 @@ config PCIE_XILINX_NWL
 
 config PCIE_DW_PLAT
        bool "Platform bus based DesignWare PCIe Controller"
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIE_DW
        ---help---
         This selects the DesignWare PCIe controller support. Select this if
@@ -40,16 +51,19 @@ config PCIE_DW_PLAT
 
 config PCIE_DW
        bool
+       depends on PCI_MSI_IRQ_DOMAIN
 
 config PCI_EXYNOS
        bool "Samsung Exynos PCIe controller"
        depends on SOC_EXYNOS5440
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIEPORTBUS
        select PCIE_DW
 
 config PCI_IMX6
        bool "Freescale i.MX6 PCIe controller"
        depends on SOC_IMX6Q
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIEPORTBUS
        select PCIE_DW
 
@@ -72,8 +86,7 @@ config PCI_RCAR_GEN2
 config PCIE_RCAR
        bool "Renesas R-Car PCIe controller"
        depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
-       select PCI_MSI
-       select PCI_MSI_IRQ_DOMAIN
+       depends on PCI_MSI_IRQ_DOMAIN
        help
          Say Y here if you want PCIe controller support on R-Car SoCs.
 
@@ -85,6 +98,7 @@ config PCI_HOST_GENERIC
        bool "Generic PCI host controller"
        depends on (ARM || ARM64) && OF
        select PCI_HOST_COMMON
+       select IRQ_DOMAIN
        help
          Say Y here if you want to support a simple generic PCI host
          controller, such as the one emulated by kvmtool.
@@ -92,6 +106,7 @@ config PCI_HOST_GENERIC
 config PCIE_SPEAR13XX
        bool "STMicroelectronics SPEAr PCIe controller"
        depends on ARCH_SPEAR13XX
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIEPORTBUS
        select PCIE_DW
        help
@@ -100,6 +115,7 @@ config PCIE_SPEAR13XX
 config PCI_KEYSTONE
        bool "TI Keystone PCIe controller"
        depends on ARCH_KEYSTONE
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIE_DW
        select PCIEPORTBUS
        help
@@ -120,7 +136,6 @@ config PCI_XGENE
        depends on ARCH_XGENE
        depends on OF
        select PCIEPORTBUS
-       select PCI_MSI_IRQ_DOMAIN if PCI_MSI
        help
          Say Y here if you want internal PCI support on APM X-Gene SoC.
          There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -128,7 +143,8 @@ config PCI_XGENE
 
 config PCI_XGENE_MSI
        bool "X-Gene v1 PCIe MSI feature"
-       depends on PCI_XGENE && PCI_MSI
+       depends on PCI_XGENE
+       depends on PCI_MSI_IRQ_DOMAIN
        default y
        help
          Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
@@ -137,6 +153,7 @@ config PCI_XGENE_MSI
 config PCI_LAYERSCAPE
        bool "Freescale Layerscape PCIe controller"
        depends on OF && (ARM || ARCH_LAYERSCAPE)
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIE_DW
        select MFD_SYSCON
        help
@@ -177,8 +194,7 @@ config PCIE_IPROC_BCMA
 config PCIE_IPROC_MSI
        bool "Broadcom iProc PCIe MSI support"
        depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
-       depends on PCI_MSI
-       select PCI_MSI_IRQ_DOMAIN
+       depends on PCI_MSI_IRQ_DOMAIN
        default ARCH_BCM_IPROC
        help
          Say Y here if you want to enable MSI support for Broadcom's iProc
@@ -195,8 +211,8 @@ config PCIE_ALTERA
 
 config PCIE_ALTERA_MSI
        bool "Altera PCIe MSI feature"
-       depends on PCIE_ALTERA && PCI_MSI
-       select PCI_MSI_IRQ_DOMAIN
+       depends on PCIE_ALTERA
+       depends on PCI_MSI_IRQ_DOMAIN
        help
          Say Y here if you want PCIe MSI support for the Altera FPGA.
          This MSI driver supports Altera MSI to GIC controller IP.
@@ -204,6 +220,7 @@ config PCIE_ALTERA_MSI
 config PCI_HISI
        depends on OF && ARM64
        bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIEPORTBUS
        select PCIE_DW
        help
@@ -213,6 +230,7 @@ config PCI_HISI
 config PCIE_QCOM
        bool "Qualcomm PCIe controller"
        depends on ARCH_QCOM && OF
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIE_DW
        select PCIEPORTBUS
        help
@@ -237,6 +255,7 @@ config PCI_HOST_THUNDER_ECAM
 config PCIE_ARMADA_8K
        bool "Marvell Armada-8K PCIe controller"
        depends on ARCH_MVEBU
+       depends on PCI_MSI_IRQ_DOMAIN
        select PCIE_DW
        select PCIEPORTBUS
        help
@@ -245,4 +264,14 @@ config PCIE_ARMADA_8K
          Designware hardware and therefore the driver re-uses the
          Designware core functions to implement the driver.
 
+config PCIE_ARTPEC6
+       bool "Axis ARTPEC-6 PCIe controller"
+       depends on MACH_ARTPEC6
+       depends on PCI_MSI_IRQ_DOMAIN
+       select PCIE_DW
+       select PCIEPORTBUS
+       help
+         Say Y here to enable PCIe controller support on Axis ARTPEC-6
+         SoCs.  This PCIe controller uses the DesignWare core.
+
 endmenu
index 9c8698e89e96ca463954bde10e2d9dbb355bcbb1..88434101e4c40075fc1e3d36ab29faf12ce30e32 100644 (file)
@@ -5,6 +5,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
 obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
 obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
 obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
@@ -29,3 +30,4 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
 obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
 obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
 obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
new file mode 100644 (file)
index 0000000..ef9893f
--- /dev/null
@@ -0,0 +1,1001 @@
+/*
+ * Driver for the Aardvark PCIe controller, used on Marvell Armada
+ * 3700.
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+
+/* PCIe core registers */
+#define PCIE_CORE_CMD_STATUS_REG                               0x4
+#define     PCIE_CORE_CMD_IO_ACCESS_EN                         BIT(0)
+#define     PCIE_CORE_CMD_MEM_ACCESS_EN                                BIT(1)
+#define     PCIE_CORE_CMD_MEM_IO_REQ_EN                                BIT(2)
+#define PCIE_CORE_DEV_CTRL_STATS_REG                           0xc8
+#define     PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE       (0 << 4)
+#define     PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT      5
+#define     PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE             (0 << 11)
+#define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT     12
+#define PCIE_CORE_LINK_CTRL_STAT_REG                           0xd0
+#define     PCIE_CORE_LINK_L0S_ENTRY                           BIT(0)
+#define     PCIE_CORE_LINK_TRAINING                            BIT(5)
+#define     PCIE_CORE_LINK_WIDTH_SHIFT                         20
+#define PCIE_CORE_ERR_CAPCTL_REG                               0x118
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX                   BIT(5)
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN                        BIT(6)
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK                     BIT(7)
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV                 BIT(8)
+
+/* PIO registers base address and register offsets */
+#define PIO_BASE_ADDR                          0x4000
+#define PIO_CTRL                               (PIO_BASE_ADDR + 0x0)
+#define   PIO_CTRL_TYPE_MASK                   GENMASK(3, 0)
+#define   PIO_CTRL_ADDR_WIN_DISABLE            BIT(24)
+#define PIO_STAT                               (PIO_BASE_ADDR + 0x4)
+#define   PIO_COMPLETION_STATUS_SHIFT          7
+#define   PIO_COMPLETION_STATUS_MASK           GENMASK(9, 7)
+#define   PIO_COMPLETION_STATUS_OK             0
+#define   PIO_COMPLETION_STATUS_UR             1
+#define   PIO_COMPLETION_STATUS_CRS            2
+#define   PIO_COMPLETION_STATUS_CA             4
+#define   PIO_NON_POSTED_REQ                   BIT(0)
+#define PIO_ADDR_LS                            (PIO_BASE_ADDR + 0x8)
+#define PIO_ADDR_MS                            (PIO_BASE_ADDR + 0xc)
+#define PIO_WR_DATA                            (PIO_BASE_ADDR + 0x10)
+#define PIO_WR_DATA_STRB                       (PIO_BASE_ADDR + 0x14)
+#define PIO_RD_DATA                            (PIO_BASE_ADDR + 0x18)
+#define PIO_START                              (PIO_BASE_ADDR + 0x1c)
+#define PIO_ISR                                        (PIO_BASE_ADDR + 0x20)
+#define PIO_ISRM                               (PIO_BASE_ADDR + 0x24)
+
+/* Aardvark Control registers */
+#define CONTROL_BASE_ADDR                      0x4800
+#define PCIE_CORE_CTRL0_REG                    (CONTROL_BASE_ADDR + 0x0)
+#define     PCIE_GEN_SEL_MSK                   0x3
+#define     PCIE_GEN_SEL_SHIFT                 0x0
+#define     SPEED_GEN_1                                0
+#define     SPEED_GEN_2                                1
+#define     SPEED_GEN_3                                2
+#define     IS_RC_MSK                          1
+#define     IS_RC_SHIFT                                2
+#define     LANE_CNT_MSK                       0x18
+#define     LANE_CNT_SHIFT                     0x3
+#define     LANE_COUNT_1                       (0 << LANE_CNT_SHIFT)
+#define     LANE_COUNT_2                       (1 << LANE_CNT_SHIFT)
+#define     LANE_COUNT_4                       (2 << LANE_CNT_SHIFT)
+#define     LANE_COUNT_8                       (3 << LANE_CNT_SHIFT)
+#define     LINK_TRAINING_EN                   BIT(6)
+#define     LEGACY_INTA                                BIT(28)
+#define     LEGACY_INTB                                BIT(29)
+#define     LEGACY_INTC                                BIT(30)
+#define     LEGACY_INTD                                BIT(31)
+#define PCIE_CORE_CTRL1_REG                    (CONTROL_BASE_ADDR + 0x4)
+#define     HOT_RESET_GEN                      BIT(0)
+#define PCIE_CORE_CTRL2_REG                    (CONTROL_BASE_ADDR + 0x8)
+#define     PCIE_CORE_CTRL2_RESERVED           0x7
+#define     PCIE_CORE_CTRL2_TD_ENABLE          BIT(4)
+#define     PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE        BIT(5)
+#define     PCIE_CORE_CTRL2_OB_WIN_ENABLE      BIT(6)
+#define     PCIE_CORE_CTRL2_MSI_ENABLE         BIT(10)
+#define PCIE_ISR0_REG                          (CONTROL_BASE_ADDR + 0x40)
+#define PCIE_ISR0_MASK_REG                     (CONTROL_BASE_ADDR + 0x44)
+#define     PCIE_ISR0_MSI_INT_PENDING          BIT(24)
+#define     PCIE_ISR0_INTX_ASSERT(val)         BIT(16 + (val))
+#define     PCIE_ISR0_INTX_DEASSERT(val)       BIT(20 + (val))
+#define            PCIE_ISR0_ALL_MASK                  GENMASK(26, 0)
+#define PCIE_ISR1_REG                          (CONTROL_BASE_ADDR + 0x48)
+#define PCIE_ISR1_MASK_REG                     (CONTROL_BASE_ADDR + 0x4C)
+#define     PCIE_ISR1_POWER_STATE_CHANGE       BIT(4)
+#define     PCIE_ISR1_FLUSH                    BIT(5)
+#define     PCIE_ISR1_ALL_MASK                 GENMASK(5, 4)
+#define PCIE_MSI_ADDR_LOW_REG                  (CONTROL_BASE_ADDR + 0x50)
+#define PCIE_MSI_ADDR_HIGH_REG                 (CONTROL_BASE_ADDR + 0x54)
+#define PCIE_MSI_STATUS_REG                    (CONTROL_BASE_ADDR + 0x58)
+#define PCIE_MSI_MASK_REG                      (CONTROL_BASE_ADDR + 0x5C)
+#define PCIE_MSI_PAYLOAD_REG                   (CONTROL_BASE_ADDR + 0x9C)
+
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR                       0x4c00
+#define OB_WIN_BLOCK_SIZE                      0x20
+#define OB_WIN_REG_ADDR(win, offset)           (OB_WIN_BASE_ADDR + \
+                                                OB_WIN_BLOCK_SIZE * (win) + \
+                                                (offset))
+#define OB_WIN_MATCH_LS(win)                   OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_MATCH_MS(win)                   OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win)                   OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win)                   OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win)                    OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win)                    OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win)                    OB_WIN_REG_ADDR(win, 0x18)
+
+/* PCIe window types */
+#define OB_PCIE_MEM                            0x0
+#define OB_PCIE_IO                             0x4
+
+/* LMI registers base address and register offsets */
+#define LMI_BASE_ADDR                          0x6000
+#define CFG_REG                                        (LMI_BASE_ADDR + 0x0)
+#define     LTSSM_SHIFT                                24
+#define     LTSSM_MASK                         0x3f
+#define     LTSSM_L0                           0x10
+#define     RC_BAR_CONFIG                      0x300
+
+/* PCIe core controller registers */
+#define CTRL_CORE_BASE_ADDR                    0x18000
+#define CTRL_CONFIG_REG                                (CTRL_CORE_BASE_ADDR + 0x0)
+#define     CTRL_MODE_SHIFT                    0x0
+#define     CTRL_MODE_MASK                     0x1
+#define     PCIE_CORE_MODE_DIRECT              0x0
+#define     PCIE_CORE_MODE_COMMAND             0x1
+
+/* PCIe Central Interrupts Registers */
+#define CENTRAL_INT_BASE_ADDR                  0x1b000
+#define HOST_CTRL_INT_STATUS_REG               (CENTRAL_INT_BASE_ADDR + 0x0)
+#define HOST_CTRL_INT_MASK_REG                 (CENTRAL_INT_BASE_ADDR + 0x4)
+#define     PCIE_IRQ_CMDQ_INT                  BIT(0)
+#define     PCIE_IRQ_MSI_STATUS_INT            BIT(1)
+#define     PCIE_IRQ_CMD_SENT_DONE             BIT(3)
+#define     PCIE_IRQ_DMA_INT                   BIT(4)
+#define     PCIE_IRQ_IB_DXFERDONE              BIT(5)
+#define     PCIE_IRQ_OB_DXFERDONE              BIT(6)
+#define     PCIE_IRQ_OB_RXFERDONE              BIT(7)
+#define     PCIE_IRQ_COMPQ_INT                 BIT(12)
+#define     PCIE_IRQ_DIR_RD_DDR_DET            BIT(13)
+#define     PCIE_IRQ_DIR_WR_DDR_DET            BIT(14)
+#define     PCIE_IRQ_CORE_INT                  BIT(16)
+#define     PCIE_IRQ_CORE_INT_PIO              BIT(17)
+#define     PCIE_IRQ_DPMU_INT                  BIT(18)
+#define     PCIE_IRQ_PCIE_MIS_INT              BIT(19)
+#define     PCIE_IRQ_MSI_INT1_DET              BIT(20)
+#define     PCIE_IRQ_MSI_INT2_DET              BIT(21)
+#define     PCIE_IRQ_RC_DBELL_DET              BIT(22)
+#define     PCIE_IRQ_EP_STATUS                 BIT(23)
+#define     PCIE_IRQ_ALL_MASK                  0xfff0fb
+#define     PCIE_IRQ_ENABLE_INTS_MASK          PCIE_IRQ_CORE_INT
+
+/* Transaction types */
+#define PCIE_CONFIG_RD_TYPE0                   0x8
+#define PCIE_CONFIG_RD_TYPE1                   0x9
+#define PCIE_CONFIG_WR_TYPE0                   0xa
+#define PCIE_CONFIG_WR_TYPE1                   0xb
+
+/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
+#define PCIE_BDF(dev)                          (dev << 4)
+#define PCIE_CONF_BUS(bus)                     (((bus) & 0xff) << 20)
+#define PCIE_CONF_DEV(dev)                     (((dev) & 0x1f) << 15)
+#define PCIE_CONF_FUNC(fun)                    (((fun) & 0x7)  << 12)
+#define PCIE_CONF_REG(reg)                     ((reg) & 0xffc)
+#define PCIE_CONF_ADDR(bus, devfn, where)      \
+       (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn))    | \
+        PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
+
+#define PIO_TIMEOUT_MS                 1
+
+#define LINK_WAIT_MAX_RETRIES          10
+#define LINK_WAIT_USLEEP_MIN           90000
+#define LINK_WAIT_USLEEP_MAX           100000
+
+#define LEGACY_IRQ_NUM                 4
+#define MSI_IRQ_NUM                    32
+
+struct advk_pcie {
+       struct platform_device *pdev;
+       void __iomem *base;
+       struct list_head resources;
+       struct irq_domain *irq_domain;
+       struct irq_chip irq_chip;
+       struct msi_controller msi;
+       struct irq_domain *msi_domain;
+       struct irq_chip msi_irq_chip;
+       DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
+       struct mutex msi_used_lock;
+       u16 msi_msg;
+       int root_bus_nr;
+};
+
+static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
+{
+       writel(val, pcie->base + reg);
+}
+
+static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
+{
+       return readl(pcie->base + reg);
+}
+
+static int advk_pcie_link_up(struct advk_pcie *pcie)
+{
+       u32 val, ltssm_state;
+
+       val = advk_readl(pcie, CFG_REG);
+       ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
+       return ltssm_state >= LTSSM_L0;
+}
+
+static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
+{
+       int retries;
+
+       /* check if the link is up or not */
+       for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+               if (advk_pcie_link_up(pcie)) {
+                       dev_info(&pcie->pdev->dev, "link up\n");
+                       return 0;
+               }
+
+               usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+       }
+
+       dev_err(&pcie->pdev->dev, "link never came up\n");
+
+       return -ETIMEDOUT;
+}
+
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
+                                u32 win_num, u32 match_ms,
+                                u32 match_ls, u32 mask_ms,
+                                u32 mask_ls, u32 remap_ms,
+                                u32 remap_ls, u32 action)
+{
+       advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
+       advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
+       advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
+       advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
+       advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
+       advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
+       advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
+       advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
+}
+
+static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+{
+       u32 reg;
+       int i;
+
+       /* Point PCIe unit MBUS decode windows to DRAM space */
+       for (i = 0; i < 8; i++)
+               advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
+
+       /* Set to Direct mode */
+       reg = advk_readl(pcie, CTRL_CONFIG_REG);
+       reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
+       reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
+       advk_writel(pcie, reg, CTRL_CONFIG_REG);
+
+       /* Set PCI global control register to RC mode */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+       reg |= (IS_RC_MSK << IS_RC_SHIFT);
+       advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+       /* Set Advanced Error Capabilities and Control PF0 register */
+       reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
+               PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
+               PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
+               PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
+       advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
+
+       /* Set PCIe Device Control and Status 1 PF0 register */
+       reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
+               (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
+               PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
+               PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+       advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
+
+       /* Program PCIe Control 2 to disable strict ordering */
+       reg = PCIE_CORE_CTRL2_RESERVED |
+               PCIE_CORE_CTRL2_TD_ENABLE;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+       /* Set GEN2 */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+       reg &= ~PCIE_GEN_SEL_MSK;
+       reg |= SPEED_GEN_2;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+       /* Set lane X1 */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+       reg &= ~LANE_CNT_MSK;
+       reg |= LANE_COUNT_1;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+       /* Enable link training */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+       reg |= LINK_TRAINING_EN;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+       /* Enable MSI */
+       reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+       reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+       /* Clear all interrupts */
+       advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+       advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+       advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+       /* Disable All ISR0/1 Sources */
+       reg = PCIE_ISR0_ALL_MASK;
+       reg &= ~PCIE_ISR0_MSI_INT_PENDING;
+       advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
+
+       advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+
+       /* Unmask all MSI's */
+       advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
+
+       /* Enable summary interrupt for GIC SPI source */
+       reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
+       advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+
+       reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+       reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
+       advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+       /* Bypass the address window mapping for PIO */
+       reg = advk_readl(pcie, PIO_CTRL);
+       reg |= PIO_CTRL_ADDR_WIN_DISABLE;
+       advk_writel(pcie, reg, PIO_CTRL);
+
+       /* Start link training */
+       reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
+       reg |= PCIE_CORE_LINK_TRAINING;
+       advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+
+       advk_pcie_wait_for_link(pcie);
+
+       reg = PCIE_CORE_LINK_L0S_ENTRY |
+               (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
+       advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+
+       reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+       reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
+               PCIE_CORE_CMD_IO_ACCESS_EN |
+               PCIE_CORE_CMD_MEM_IO_REQ_EN;
+       advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
+}
+
+static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+{
+       u32 reg;
+       unsigned int status;
+       char *strcomp_status, *str_posted;
+
+       reg = advk_readl(pcie, PIO_STAT);
+       status = (reg & PIO_COMPLETION_STATUS_MASK) >>
+               PIO_COMPLETION_STATUS_SHIFT;
+
+       if (!status)
+               return;
+
+       switch (status) {
+       case PIO_COMPLETION_STATUS_UR:
+               strcomp_status = "UR";
+               break;
+       case PIO_COMPLETION_STATUS_CRS:
+               strcomp_status = "CRS";
+               break;
+       case PIO_COMPLETION_STATUS_CA:
+               strcomp_status = "CA";
+               break;
+       default:
+               strcomp_status = "Unknown";
+               break;
+       }
+
+       if (reg & PIO_NON_POSTED_REQ)
+               str_posted = "Non-posted";
+       else
+               str_posted = "Posted";
+
+       dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+               str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+}
+
+static int advk_pcie_wait_pio(struct advk_pcie *pcie)
+{
+       unsigned long timeout;
+
+       timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
+
+       while (time_before(jiffies, timeout)) {
+               u32 start, isr;
+
+               start = advk_readl(pcie, PIO_START);
+               isr = advk_readl(pcie, PIO_ISR);
+               if (!start && isr)
+                       return 0;
+       }
+
+       dev_err(&pcie->pdev->dev, "config read/write timed out\n");
+       return -ETIMEDOUT;
+}
+
+static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+                            int where, int size, u32 *val)
+{
+       struct advk_pcie *pcie = bus->sysdata;
+       u32 reg;
+       int ret;
+
+       if (PCI_SLOT(devfn) != 0) {
+               *val = 0xffffffff;
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       }
+
+       /* Start PIO */
+       advk_writel(pcie, 0, PIO_START);
+       advk_writel(pcie, 1, PIO_ISR);
+
+       /* Program the control register */
+       reg = advk_readl(pcie, PIO_CTRL);
+       reg &= ~PIO_CTRL_TYPE_MASK;
+       if (bus->number ==  pcie->root_bus_nr)
+               reg |= PCIE_CONFIG_RD_TYPE0;
+       else
+               reg |= PCIE_CONFIG_RD_TYPE1;
+       advk_writel(pcie, reg, PIO_CTRL);
+
+       /* Program the address registers */
+       reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+       advk_writel(pcie, reg, PIO_ADDR_LS);
+       advk_writel(pcie, 0, PIO_ADDR_MS);
+
+       /* Program the data strobe */
+       advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
+
+       /* Start the transfer */
+       advk_writel(pcie, 1, PIO_START);
+
+       ret = advk_pcie_wait_pio(pcie);
+       if (ret < 0)
+               return PCIBIOS_SET_FAILED;
+
+       advk_pcie_check_pio_status(pcie);
+
+       /* Get the read result */
+       *val = advk_readl(pcie, PIO_RD_DATA);
+       if (size == 1)
+               *val = (*val >> (8 * (where & 3))) & 0xff;
+       else if (size == 2)
+               *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+                               int where, int size, u32 val)
+{
+       struct advk_pcie *pcie = bus->sysdata;
+       u32 reg;
+       u32 data_strobe = 0x0;
+       int offset;
+       int ret;
+
+       if (PCI_SLOT(devfn) != 0)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       if (where % size)
+               return PCIBIOS_SET_FAILED;
+
+       /* Start PIO */
+       advk_writel(pcie, 0, PIO_START);
+       advk_writel(pcie, 1, PIO_ISR);
+
+       /* Program the control register */
+       reg = advk_readl(pcie, PIO_CTRL);
+       reg &= ~PIO_CTRL_TYPE_MASK;
+       if (bus->number == pcie->root_bus_nr)
+               reg |= PCIE_CONFIG_WR_TYPE0;
+       else
+               reg |= PCIE_CONFIG_WR_TYPE1;
+       advk_writel(pcie, reg, PIO_CTRL);
+
+       /* Program the address registers */
+       reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+       advk_writel(pcie, reg, PIO_ADDR_LS);
+       advk_writel(pcie, 0, PIO_ADDR_MS);
+
+       /* Calculate the write strobe */
+       offset      = where & 0x3;
+       reg         = val << (8 * offset);
+       data_strobe = GENMASK(size - 1, 0) << offset;
+
+       /* Program the data register */
+       advk_writel(pcie, reg, PIO_WR_DATA);
+
+       /* Program the data strobe */
+       advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
+
+       /* Start the transfer */
+       advk_writel(pcie, 1, PIO_START);
+
+       ret = advk_pcie_wait_pio(pcie);
+       if (ret < 0)
+               return PCIBIOS_SET_FAILED;
+
+       advk_pcie_check_pio_status(pcie);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops advk_pcie_ops = {
+       .read = advk_pcie_rd_conf,
+       .write = advk_pcie_wr_conf,
+};
+
+static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
+{
+       int hwirq;
+
+       mutex_lock(&pcie->msi_used_lock);
+       hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
+       if (hwirq >= MSI_IRQ_NUM)
+               hwirq = -ENOSPC;
+       else
+               set_bit(hwirq, pcie->msi_irq_in_use);
+       mutex_unlock(&pcie->msi_used_lock);
+
+       return hwirq;
+}
+
+static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
+{
+       mutex_lock(&pcie->msi_used_lock);
+       if (!test_bit(hwirq, pcie->msi_irq_in_use))
+               dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
+                       hwirq);
+       else
+               clear_bit(hwirq, pcie->msi_irq_in_use);
+       mutex_unlock(&pcie->msi_used_lock);
+}
+
+static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
+                                  struct pci_dev *pdev,
+                                  struct msi_desc *desc)
+{
+       struct advk_pcie *pcie = pdev->bus->sysdata;
+       struct msi_msg msg;
+       int virq, hwirq;
+       phys_addr_t msi_msg_phys;
+
+       /* We support MSI, but not MSI-X */
+       if (desc->msi_attrib.is_msix)
+               return -EINVAL;
+
+       hwirq = advk_pcie_alloc_msi(pcie);
+       if (hwirq < 0)
+               return hwirq;
+
+       virq = irq_create_mapping(pcie->msi_domain, hwirq);
+       if (!virq) {
+               advk_pcie_free_msi(pcie, hwirq);
+               return -EINVAL;
+       }
+
+       irq_set_msi_desc(virq, desc);
+
+       msi_msg_phys = virt_to_phys(&pcie->msi_msg);
+
+       msg.address_lo = lower_32_bits(msi_msg_phys);
+       msg.address_hi = upper_32_bits(msi_msg_phys);
+       msg.data = virq;
+
+       pci_write_msi_msg(virq, &msg);
+
+       return 0;
+}
+
+static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
+                                      unsigned int irq)
+{
+       struct irq_data *d = irq_get_irq_data(irq);
+       struct msi_desc *msi = irq_data_get_msi_desc(d);
+       struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
+       unsigned long hwirq = d->hwirq;
+
+       irq_dispose_mapping(irq);
+       advk_pcie_free_msi(pcie, hwirq);
+}
+
+static int advk_pcie_msi_map(struct irq_domain *domain,
+                            unsigned int virq, irq_hw_number_t hw)
+{
+       struct advk_pcie *pcie = domain->host_data;
+
+       irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
+                                handle_simple_irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
+       .map = advk_pcie_msi_map,
+};
+
+static void advk_pcie_irq_mask(struct irq_data *d)
+{
+       struct advk_pcie *pcie = d->domain->host_data;
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       u32 mask;
+
+       mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+       mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
+       advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+}
+
+static void advk_pcie_irq_unmask(struct irq_data *d)
+{
+       struct advk_pcie *pcie = d->domain->host_data;
+       irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       u32 mask;
+
+       mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+       mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
+       advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+}
+
+static int advk_pcie_irq_map(struct irq_domain *h,
+                            unsigned int virq, irq_hw_number_t hwirq)
+{
+       struct advk_pcie *pcie = h->host_data;
+
+       advk_pcie_irq_mask(irq_get_irq_data(virq));
+       irq_set_status_flags(virq, IRQ_LEVEL);
+       irq_set_chip_and_handler(virq, &pcie->irq_chip,
+                                handle_level_irq);
+       irq_set_chip_data(virq, pcie);
+
+       return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
+       .map = advk_pcie_irq_map,
+       .xlate = irq_domain_xlate_onecell,
+};
+
+static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
+{
+       struct device *dev = &pcie->pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct irq_chip *msi_irq_chip;
+       struct msi_controller *msi;
+       phys_addr_t msi_msg_phys;
+       int ret;
+
+       msi_irq_chip = &pcie->msi_irq_chip;
+
+       msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
+                                           dev_name(dev));
+       if (!msi_irq_chip->name)
+               return -ENOMEM;
+
+       msi_irq_chip->irq_enable = pci_msi_unmask_irq;
+       msi_irq_chip->irq_disable = pci_msi_mask_irq;
+       msi_irq_chip->irq_mask = pci_msi_mask_irq;
+       msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
+
+       msi = &pcie->msi;
+
+       msi->setup_irq = advk_pcie_setup_msi_irq;
+       msi->teardown_irq = advk_pcie_teardown_msi_irq;
+       msi->of_node = node;
+
+       mutex_init(&pcie->msi_used_lock);
+
+       msi_msg_phys = virt_to_phys(&pcie->msi_msg);
+
+       advk_writel(pcie, lower_32_bits(msi_msg_phys),
+                   PCIE_MSI_ADDR_LOW_REG);
+       advk_writel(pcie, upper_32_bits(msi_msg_phys),
+                   PCIE_MSI_ADDR_HIGH_REG);
+
+       pcie->msi_domain =
+               irq_domain_add_linear(NULL, MSI_IRQ_NUM,
+                                     &advk_pcie_msi_irq_ops, pcie);
+       if (!pcie->msi_domain)
+               return -ENOMEM;
+
+       ret = of_pci_msi_chip_add(msi);
+       if (ret < 0) {
+               irq_domain_remove(pcie->msi_domain);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
+{
+       of_pci_msi_chip_remove(&pcie->msi);
+       irq_domain_remove(pcie->msi_domain);
+}
+
+static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
+{
+       struct device *dev = &pcie->pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct device_node *pcie_intc_node;
+       struct irq_chip *irq_chip;
+
+       pcie_intc_node =  of_get_next_child(node, NULL);
+       if (!pcie_intc_node) {
+               dev_err(dev, "No PCIe Intc node found\n");
+               return -ENODEV;
+       }
+
+       irq_chip = &pcie->irq_chip;
+
+       irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
+                                       dev_name(dev));
+       if (!irq_chip->name) {
+               of_node_put(pcie_intc_node);
+               return -ENOMEM;
+       }
+
+       irq_chip->irq_mask = advk_pcie_irq_mask;
+       irq_chip->irq_mask_ack = advk_pcie_irq_mask;
+       irq_chip->irq_unmask = advk_pcie_irq_unmask;
+
+       pcie->irq_domain =
+               irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM,
+                                     &advk_pcie_irq_domain_ops, pcie);
+       if (!pcie->irq_domain) {
+               dev_err(dev, "Failed to get a INTx IRQ domain\n");
+               of_node_put(pcie_intc_node);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
+{
+       irq_domain_remove(pcie->irq_domain);
+}
+
+static void advk_pcie_handle_msi(struct advk_pcie *pcie)
+{
+       u32 msi_val, msi_mask, msi_status, msi_idx;
+       u16 msi_data;
+
+       msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+       msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
+       msi_status = msi_val & ~msi_mask;
+
+       for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
+               if (!(BIT(msi_idx) & msi_status))
+                       continue;
+
+               advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
+               msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF;
+               generic_handle_irq(msi_data);
+       }
+
+       advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
+                   PCIE_ISR0_REG);
+}
+
+static void advk_pcie_handle_int(struct advk_pcie *pcie)
+{
+       u32 val, mask, status;
+       int i, virq;
+
+       val = advk_readl(pcie, PCIE_ISR0_REG);
+       mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+       status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
+
+       if (!status) {
+               advk_writel(pcie, val, PCIE_ISR0_REG);
+               return;
+       }
+
+       /* Process MSI interrupts */
+       if (status & PCIE_ISR0_MSI_INT_PENDING)
+               advk_pcie_handle_msi(pcie);
+
+       /* Process legacy interrupts */
+       for (i = 0; i < LEGACY_IRQ_NUM; i++) {
+               if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
+                       continue;
+
+               advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
+                           PCIE_ISR0_REG);
+
+               virq = irq_find_mapping(pcie->irq_domain, i);
+               generic_handle_irq(virq);
+       }
+}
+
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+{
+       struct advk_pcie *pcie = arg;
+       u32 status;
+
+       status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+       if (!(status & PCIE_IRQ_CORE_INT))
+               return IRQ_NONE;
+
+       advk_pcie_handle_int(pcie);
+
+       /* Clear interrupt */
+       advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+
+       return IRQ_HANDLED;
+}
+
+static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
+{
+       int err, res_valid = 0;
+       struct device *dev = &pcie->pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct resource_entry *win;
+       resource_size_t iobase;
+
+       INIT_LIST_HEAD(&pcie->resources);
+
+       err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
+                                              &iobase);
+       if (err)
+               return err;
+
+       err = devm_request_pci_bus_resources(dev, &pcie->resources);
+       if (err)
+               goto out_release_res;
+
+       resource_list_for_each_entry(win, &pcie->resources) {
+               struct resource *res = win->res;
+
+               switch (resource_type(res)) {
+               case IORESOURCE_IO:
+                       advk_pcie_set_ob_win(pcie, 1,
+                                            upper_32_bits(res->start),
+                                            lower_32_bits(res->start),
+                                            0, 0xF8000000, 0,
+                                            lower_32_bits(res->start),
+                                            OB_PCIE_IO);
+                       err = pci_remap_iospace(res, iobase);
+                       if (err)
+                               dev_warn(dev, "error %d: failed to map resource %pR\n",
+                                        err, res);
+                       break;
+               case IORESOURCE_MEM:
+                       advk_pcie_set_ob_win(pcie, 0,
+                                            upper_32_bits(res->start),
+                                            lower_32_bits(res->start),
+                                            0x0, 0xF8000000, 0,
+                                            lower_32_bits(res->start),
+                                            (2 << 20) | OB_PCIE_MEM);
+                       res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+                       break;
+               case IORESOURCE_BUS:
+                       pcie->root_bus_nr = res->start;
+                       break;
+               }
+       }
+
+       if (!res_valid) {
+               dev_err(dev, "non-prefetchable memory resource required\n");
+               err = -EINVAL;
+               goto out_release_res;
+       }
+
+       return 0;
+
+out_release_res:
+       pci_free_resource_list(&pcie->resources);
+       return err;
+}
+
+static int advk_pcie_probe(struct platform_device *pdev)
+{
+       struct advk_pcie *pcie;
+       struct resource *res;
+       struct pci_bus *bus, *child;
+       struct msi_controller *msi;
+       struct device_node *msi_node;
+       int ret, irq;
+
+       pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
+                           GFP_KERNEL);
+       if (!pcie)
+               return -ENOMEM;
+
+       pcie->pdev = pdev;
+       platform_set_drvdata(pdev, pcie);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pcie->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pcie->base)) {
+               dev_err(&pdev->dev, "Failed to map registers\n");
+               return PTR_ERR(pcie->base);
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
+                              IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+                              pcie);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register interrupt\n");
+               return ret;
+       }
+
+       ret = advk_pcie_parse_request_of_pci_ranges(pcie);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to parse resources\n");
+               return ret;
+       }
+
+       advk_pcie_setup_hw(pcie);
+
+       ret = advk_pcie_init_irq_domain(pcie);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to initialize irq\n");
+               return ret;
+       }
+
+       ret = advk_pcie_init_msi_irq_domain(pcie);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to initialize irq\n");
+               advk_pcie_remove_irq_domain(pcie);
+               return ret;
+       }
+
+       msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
+       if (msi_node)
+               msi = of_pci_find_msi_chip_by_node(msi_node);
+       else
+               msi = NULL;
+
+       bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
+                                   pcie, &pcie->resources, &pcie->msi);
+       if (!bus) {
+               advk_pcie_remove_msi_irq_domain(pcie);
+               advk_pcie_remove_irq_domain(pcie);
+               return -ENOMEM;
+       }
+
+       pci_bus_assign_resources(bus);
+
+       list_for_each_entry(child, &bus->children, node)
+               pcie_bus_configure_settings(child);
+
+       pci_bus_add_devices(bus);
+
+       return 0;
+}
+
+static const struct of_device_id advk_pcie_of_match_table[] = {
+       { .compatible = "marvell,armada-3700-pcie", },
+       {},
+};
+
+static struct platform_driver advk_pcie_driver = {
+       .driver = {
+               .name = "advk-pcie",
+               .of_match_table = advk_pcie_of_match_table,
+               /* Driver unloading/unbinding currently not supported */
+               .suppress_bind_attrs = true,
+       },
+       .probe = advk_pcie_probe,
+};
+builtin_platform_driver(advk_pcie_driver);
index f441130407e77d2e33c645a42c9972c69a3d0462..81b3949a26dbda1ed9586ba2cefbe7450f5e7f02 100644 (file)
@@ -181,14 +181,14 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
 
        if (!pcie_intc_node) {
                dev_err(dev, "No PCIe Intc node found\n");
-               return PTR_ERR(pcie_intc_node);
+               return -ENODEV;
        }
 
        pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
                                               &intx_domain_ops, pp);
        if (!pp->irq_domain) {
                dev_err(dev, "Failed to get a INTx IRQ domain\n");
-               return PTR_ERR(pp->irq_domain);
+               return -ENODEV;
        }
 
        return 0;
index 8cba7ab73df991aa6d299caf6668573486477c83..9d9d34e959b6a253814c6e7d931f6532e4f7887e 100644 (file)
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
 #include <linux/platform_device.h>
 
-#include "../ecam.h"
-
 static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
                       struct list_head *resources, struct resource **bus_range)
 {
@@ -36,44 +35,34 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
        if (err)
                return err;
 
+       err = devm_request_pci_bus_resources(dev, resources);
+       if (err)
+               return err;
+
        resource_list_for_each_entry(win, resources) {
-               struct resource *parent, *res = win->res;
+               struct resource *res = win->res;
 
                switch (resource_type(res)) {
                case IORESOURCE_IO:
-                       parent = &ioport_resource;
                        err = pci_remap_iospace(res, iobase);
-                       if (err) {
+                       if (err)
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
-                               continue;
-                       }
                        break;
                case IORESOURCE_MEM:
-                       parent = &iomem_resource;
                        res_valid |= !(res->flags & IORESOURCE_PREFETCH);
                        break;
                case IORESOURCE_BUS:
                        *bus_range = res;
-               default:
-                       continue;
+                       break;
                }
-
-               err = devm_request_resource(dev, parent, res);
-               if (err)
-                       goto out_release_res;
-       }
-
-       if (!res_valid) {
-               dev_err(dev, "non-prefetchable memory resource required\n");
-               err = -EINVAL;
-               goto out_release_res;
        }
 
-       return 0;
+       if (res_valid)
+               return 0;
 
-out_release_res:
-       return err;
+       dev_err(dev, "non-prefetchable memory resource required\n");
+       return -EINVAL;
 }
 
 static void gen_pci_unmap_cfg(void *ptr)
@@ -155,7 +144,14 @@ int pci_host_common_probe(struct platform_device *pdev,
 
        pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 
-       if (!pci_has_flag(PCI_PROBE_ONLY)) {
+       /*
+        * We insert PCI resources into the iomem_resource and
+        * ioport_resource trees in either pci_bus_claim_resources()
+        * or pci_bus_assign_resources().
+        */
+       if (pci_has_flag(PCI_PROBE_ONLY)) {
+               pci_bus_claim_resources(bus);
+       } else {
                pci_bus_size_bridges(bus);
                pci_bus_assign_resources(bus);
 
index 6eaceab1bf043e2d5006adddfb1fc3c9d6188d31..c05ea9d72f693f38203e98ae92c4c95df4dab661 100644 (file)
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
 #include <linux/platform_device.h>
 
-#include "../ecam.h"
-
 static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
        .bus_shift      = 16,
        .pci_ops        = {
@@ -46,8 +45,6 @@ static const struct of_device_id gen_pci_of_match[] = {
        { },
 };
 
-MODULE_DEVICE_TABLE(of, gen_pci_of_match);
-
 static int gen_pci_probe(struct platform_device *pdev)
 {
        const struct of_device_id *of_id;
@@ -66,8 +63,4 @@ static struct platform_driver gen_pci_driver = {
        },
        .probe = gen_pci_probe,
 };
-module_platform_driver(gen_pci_driver);
-
-MODULE_DESCRIPTION("Generic PCI host driver");
-MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(gen_pci_driver);
index 7e9b2de2aa24b64306a764d127348db9f91cbe9f..6955ffdb89f33332f89595407b82cd0f8e6af779 100644 (file)
@@ -732,16 +732,18 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
 
        pdev = msi_desc_to_pci_dev(msi);
        hbus = info->data;
-       hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
-       if (!hpdev)
+       int_desc = irq_data_get_irq_chip_data(irq_data);
+       if (!int_desc)
                return;
 
-       int_desc = irq_data_get_irq_chip_data(irq_data);
-       if (int_desc) {
-               irq_data->chip_data = NULL;
-               hv_int_desc_free(hpdev, int_desc);
+       irq_data->chip_data = NULL;
+       hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+       if (!hpdev) {
+               kfree(int_desc);
+               return;
        }
 
+       hv_int_desc_free(hpdev, int_desc);
        put_pcichild(hpdev, hv_pcidev_ref_by_slot);
 }
 
@@ -1657,14 +1659,16 @@ static void hv_pci_onchannelcallback(void *context)
                        continue;
                }
 
+               /* Zero length indicates there are no more packets. */
+               if (ret || !bytes_recvd)
+                       break;
+
                /*
                 * All incoming packets must be at least as large as a
                 * response.
                 */
-               if (bytes_recvd <= sizeof(struct pci_response)) {
-                       kfree(buffer);
-                       return;
-               }
+               if (bytes_recvd <= sizeof(struct pci_response))
+                       continue;
                desc = (struct vmpacket_descriptor *)buffer;
 
                switch (desc->type) {
@@ -1679,8 +1683,7 @@ static void hv_pci_onchannelcallback(void *context)
                        comp_packet->completion_func(comp_packet->compl_ctxt,
                                                     response,
                                                     bytes_recvd);
-                       kfree(buffer);
-                       return;
+                       break;
 
                case VM_PKT_DATA_INBAND:
 
@@ -1727,8 +1730,9 @@ static void hv_pci_onchannelcallback(void *context)
                                desc->type, req_id, bytes_recvd);
                        break;
                }
-               break;
        }
+
+       kfree(buffer);
 }
 
 /**
index 6b8301ef21ca1f84bcd142275561b60f1f71a3fc..8ba28834d470e8f8b42228988772ba25c691673a 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/msi.h>
 #include <linux/of_irq.h>
 #include <linux/of.h>
@@ -360,7 +360,6 @@ static const struct of_device_id ks_pcie_of_match[] = {
        },
        { },
 };
-MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
 
 static int __exit ks_pcie_remove(struct platform_device *pdev)
 {
@@ -439,9 +438,4 @@ static struct platform_driver ks_pcie_driver __refdata = {
                .of_match_table = of_match_ptr(ks_pcie_of_match),
        },
 };
-
-module_platform_driver(ks_pcie_driver);
-
-MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
-MODULE_DESCRIPTION("Keystone PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(ks_pcie_driver);
index a21e229d95e066fa440b94b6eeb0be77b9368877..114ba819277af73772696f05cd3c8669150b4aca 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of_pci.h>
 #include <linux/of_platform.h>
 #include <linux/of_irq.h>
@@ -211,7 +211,6 @@ static const struct of_device_id ls_pcie_of_match[] = {
        { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
        { },
 };
-MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
 
 static int __init ls_add_pcie_port(struct pcie_port *pp,
                                   struct platform_device *pdev)
@@ -275,9 +274,4 @@ static struct platform_driver ls_pcie_driver = {
                .of_match_table = ls_pcie_of_match,
        },
 };
-
-module_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
-
-MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
-MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
index 6b451df6502c700d64278bec4f5b706ee63260e2..307f81d6b479af4e55ed4e1ab30ba1cfdfdf22d2 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * PCIe driver for Marvell Armada 370 and Armada XP SoCs
  *
+ * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
  * This file is licensed under the terms of the GNU General Public
  * License version 2.  This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
@@ -11,7 +13,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/gpio.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/mbus.h>
 #include <linux/msi.h>
 #include <linux/slab.h>
@@ -839,25 +841,22 @@ static struct pci_ops mvebu_pcie_ops = {
 static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
 {
        struct mvebu_pcie *pcie = sys_to_pcie(sys);
-       int i;
+       int err, i;
 
        pcie->mem.name = "PCI MEM";
        pcie->realio.name = "PCI I/O";
 
-       if (request_resource(&iomem_resource, &pcie->mem))
-               return 0;
-
-       if (resource_size(&pcie->realio) != 0) {
-               if (request_resource(&ioport_resource, &pcie->realio)) {
-                       release_resource(&pcie->mem);
-                       return 0;
-               }
+       if (resource_size(&pcie->realio) != 0)
                pci_add_resource_offset(&sys->resources, &pcie->realio,
                                        sys->io_offset);
-       }
+
        pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
        pci_add_resource(&sys->resources, &pcie->busn);
 
+       err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
+       if (err)
+               return 0;
+
        for (i = 0; i < pcie->nports; i++) {
                struct mvebu_pcie_port *port = &pcie->ports[i];
 
@@ -1298,7 +1297,6 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
        { .compatible = "marvell,kirkwood-pcie", },
        {},
 };
-MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
 
 static const struct dev_pm_ops mvebu_pcie_pm_ops = {
        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
@@ -1314,8 +1312,4 @@ static struct platform_driver mvebu_pcie_driver = {
        },
        .probe = mvebu_pcie_probe,
 };
-module_platform_driver(mvebu_pcie_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell EBU PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(mvebu_pcie_driver);
index 9980a4bdae7ee1c68c1f3c9bab2bdfee0a6b9ab2..597566f96f5eb68afba8b234c91ae990ba9340f6 100644 (file)
@@ -4,6 +4,8 @@
  * Copyright (C) 2013 Renesas Solutions Corp.
  * Copyright (C) 2013 Cogent Embedded, Inc.
  *
+ * Author: Valentine Barshak <valentine.barshak@cogentembedded.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -14,7 +16,6 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
@@ -97,7 +98,6 @@
 struct rcar_pci_priv {
        struct device *dev;
        void __iomem *reg;
-       struct resource io_res;
        struct resource mem_res;
        struct resource *cfg_res;
        unsigned busnr;
@@ -194,6 +194,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
        struct rcar_pci_priv *priv = sys->private_data;
        void __iomem *reg = priv->reg;
        u32 val;
+       int ret;
 
        pm_runtime_enable(priv->dev);
        pm_runtime_get_sync(priv->dev);
@@ -273,8 +274,10 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
                rcar_pci_setup_errirq(priv);
 
        /* Add PCI resources */
-       pci_add_resource(&sys->resources, &priv->io_res);
        pci_add_resource(&sys->resources, &priv->mem_res);
+       ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
+       if (ret < 0)
+               return ret;
 
        /* Setup bus number based on platform device id / of bus-range */
        sys->busnr = priv->busnr;
@@ -371,14 +374,6 @@ static int rcar_pci_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        priv->mem_res = *mem_res;
-       /*
-        * The controller does not support/use port I/O,
-        * so setup a dummy port I/O region here.
-        */
-       priv->io_res.start = priv->mem_res.start;
-       priv->io_res.end = priv->mem_res.end;
-       priv->io_res.flags = IORESOURCE_IO;
-
        priv->cfg_res = cfg_res;
 
        priv->irq = platform_get_irq(pdev, 0);
@@ -421,6 +416,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
        hw_private[0] = priv;
        memset(&hw, 0, sizeof(hw));
        hw.nr_controllers = ARRAY_SIZE(hw_private);
+       hw.io_optional = 1;
        hw.private_data = hw_private;
        hw.map_irq = rcar_pci_map_irq;
        hw.ops = &rcar_pci_ops;
@@ -437,8 +433,6 @@ static struct of_device_id rcar_pci_of_match[] = {
        { },
 };
 
-MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
-
 static struct platform_driver rcar_pci_driver = {
        .driver = {
                .name = "pci-rcar-gen2",
@@ -447,9 +441,4 @@ static struct platform_driver rcar_pci_driver = {
        },
        .probe = rcar_pci_probe,
 };
-
-module_platform_driver(rcar_pci_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
-MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
+builtin_platform_driver(rcar_pci_driver);
index c388468c202a75efd28bef26eaafb190690c5856..6de0757b11e4641b2a79b05fd62d19083656419e 100644 (file)
@@ -9,6 +9,8 @@
  *
  * Bits taken from arch/arm/mach-dove/pcie.c
  *
+ * Author: Thierry Reding <treding@nvidia.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -32,7 +34,7 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 
 #define AFI_PEXBIAS_CTRL_0             0x168
 
-#define RP_VEND_XP     0x00000F00
+#define RP_VEND_XP     0x00000f00
 #define  RP_VEND_XP_DL_UP      (1 << 30)
 
-#define RP_PRIV_MISC   0x00000FE0
-#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
-#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
+#define RP_PRIV_MISC   0x00000fe0
+#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
+#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
 
 #define RP_LINK_CONTROL_STATUS                 0x00000090
 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK  0x3fff0000
 
-#define PADS_CTL_SEL           0x0000009C
+#define PADS_CTL_SEL           0x0000009c
 
-#define PADS_CTL               0x000000A0
+#define PADS_CTL               0x000000a0
 #define  PADS_CTL_IDDQ_1L      (1 << 0)
 #define  PADS_CTL_TX_DATA_EN_1L        (1 << 6)
 #define  PADS_CTL_RX_DATA_EN_1L        (1 << 10)
 
-#define PADS_PLL_CTL_TEGRA20                   0x000000B8
-#define PADS_PLL_CTL_TEGRA30                   0x000000B4
+#define PADS_PLL_CTL_TEGRA20                   0x000000b8
+#define PADS_PLL_CTL_TEGRA30                   0x000000b4
 #define  PADS_PLL_CTL_RST_B4SM                 (1 << 1)
 #define  PADS_PLL_CTL_LOCKDET                  (1 << 8)
 #define  PADS_PLL_CTL_REFCLK_MASK              (0x3 << 16)
 #define  PADS_PLL_CTL_TXCLKREF_DIV5            (1 << 20)
 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN          (1 << 22)
 
-#define PADS_REFCLK_CFG0                       0x000000C8
-#define PADS_REFCLK_CFG1                       0x000000CC
-#define PADS_REFCLK_BIAS                       0x000000D0
+#define PADS_REFCLK_CFG0                       0x000000c8
+#define PADS_REFCLK_CFG1                       0x000000cc
+#define PADS_REFCLK_BIAS                       0x000000d0
 
 /*
  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 #define PADS_REFCLK_CFG_PREDI_SHIFT            8  /* 11:8 */
 #define PADS_REFCLK_CFG_DRVI_SHIFT             12 /* 15:12 */
 
-/* Default value provided by HW engineering is 0xfa5c */
-#define PADS_REFCLK_CFG_VALUE \
-       ( \
-               (0x17 << PADS_REFCLK_CFG_TERM_SHIFT)   | \
-               (0    << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
-               (0xa  << PADS_REFCLK_CFG_PREDI_SHIFT)  | \
-               (0xf  << PADS_REFCLK_CFG_DRVI_SHIFT)     \
-       )
-
 struct tegra_msi {
        struct msi_controller chip;
        DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -252,6 +245,8 @@ struct tegra_pcie_soc_data {
        unsigned int msi_base_shift;
        u32 pads_pll_ctl;
        u32 tx_ref_sel;
+       u32 pads_refclk_cfg0;
+       u32 pads_refclk_cfg1;
        bool has_pex_clkreq_en;
        bool has_pex_bias_ctrl;
        bool has_intr_prsnt_sense;
@@ -274,7 +269,6 @@ struct tegra_pcie {
        struct list_head buses;
        struct resource *cs;
 
-       struct resource all;
        struct resource io;
        struct resource pio;
        struct resource mem;
@@ -623,30 +617,21 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
        sys->mem_offset = pcie->offset.mem;
        sys->io_offset = pcie->offset.io;
 
-       err = devm_request_resource(pcie->dev, &pcie->all, &pcie->io);
-       if (err < 0)
-               return err;
-
-       err = devm_request_resource(pcie->dev, &ioport_resource, &pcie->pio);
-       if (err < 0)
-               return err;
-
-       err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
+       err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
        if (err < 0)
                return err;
 
-       err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
-       if (err)
-               return err;
-
        pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
        pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
        pci_add_resource_offset(&sys->resources, &pcie->prefetch,
                                sys->mem_offset);
        pci_add_resource(&sys->resources, &pcie->busn);
 
-       pci_ioremap_io(pcie->pio.start, pcie->io.start);
+       err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
+       if (err < 0)
+               return err;
 
+       pci_remap_iospace(&pcie->pio, pcie->io.start);
        return 1;
 }
 
@@ -838,12 +823,6 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
        value |= PADS_PLL_CTL_RST_B4SM;
        pads_writel(pcie, value, soc->pads_pll_ctl);
 
-       /* Configure the reference clock driver */
-       value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
-       pads_writel(pcie, value, PADS_REFCLK_CFG0);
-       if (soc->num_ports > 2)
-               pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
-
        /* wait for the PLL to lock */
        err = tegra_pcie_pll_wait(pcie, 500);
        if (err < 0) {
@@ -927,6 +906,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
 
 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
 {
+       const struct tegra_pcie_soc_data *soc = pcie->soc_data;
        struct tegra_pcie_port *port;
        int err;
 
@@ -952,6 +932,12 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
                }
        }
 
+       /* Configure the reference clock driver */
+       pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
+
+       if (soc->num_ports > 2)
+               pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
+
        return 0;
 }
 
@@ -1822,12 +1808,6 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
        struct resource res;
        int err;
 
-       memset(&pcie->all, 0, sizeof(pcie->all));
-       pcie->all.flags = IORESOURCE_MEM;
-       pcie->all.name = np->full_name;
-       pcie->all.start = ~0;
-       pcie->all.end = 0;
-
        if (of_pci_range_parser_init(&parser, np)) {
                dev_err(pcie->dev, "missing \"ranges\" property\n");
                return -EINVAL;
@@ -1880,18 +1860,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
                        }
                        break;
                }
-
-               if (res.start <= pcie->all.start)
-                       pcie->all.start = res.start;
-
-               if (res.end >= pcie->all.end)
-                       pcie->all.end = res.end;
        }
 
-       err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
-       if (err < 0)
-               return err;
-
        err = of_pci_parse_bus_range(np, &pcie->busn);
        if (err < 0) {
                dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -2078,6 +2048,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
        .msi_base_shift = 0,
        .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+       .pads_refclk_cfg0 = 0xfa5cfa5c,
        .has_pex_clkreq_en = false,
        .has_pex_bias_ctrl = false,
        .has_intr_prsnt_sense = false,
@@ -2090,6 +2061,8 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
        .msi_base_shift = 8,
        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+       .pads_refclk_cfg0 = 0xfa5cfa5c,
+       .pads_refclk_cfg1 = 0xfa5cfa5c,
        .has_pex_clkreq_en = true,
        .has_pex_bias_ctrl = true,
        .has_intr_prsnt_sense = true,
@@ -2102,6 +2075,7 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = {
        .msi_base_shift = 8,
        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+       .pads_refclk_cfg0 = 0x44ac44ac,
        .has_pex_clkreq_en = true,
        .has_pex_bias_ctrl = true,
        .has_intr_prsnt_sense = true,
@@ -2115,7 +2089,6 @@ static const struct of_device_id tegra_pcie_of_match[] = {
        { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
        { },
 };
-MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
 
 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
 {
@@ -2249,8 +2222,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
        if (err < 0)
                return err;
 
-       pcibios_min_mem = 0;
-
        err = tegra_pcie_get_resources(pcie);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to request resources: %d\n", err);
@@ -2306,8 +2277,4 @@ static struct platform_driver tegra_pcie_driver = {
        },
        .probe = tegra_pcie_probe,
 };
-module_platform_driver(tegra_pcie_driver);
-
-MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(tegra_pcie_driver);
index 540d030613eb9485b7ab163f0ddd0da747fdda10..d50a3dc2d8db127e225df7bfd24dd435500526cc 100644 (file)
@@ -7,14 +7,13 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/of_pci.h>
 #include <linux/of.h>
+#include <linux/pci-ecam.h>
 #include <linux/platform_device.h>
 
-#include "../ecam.h"
-
 static void set_val(u32 v, int where, int size, u32 *val)
 {
        int shift = (where & 3) * 8;
@@ -360,7 +359,6 @@ static const struct of_device_id thunder_ecam_of_match[] = {
        { .compatible = "cavium,pci-host-thunder-ecam" },
        { },
 };
-MODULE_DEVICE_TABLE(of, thunder_ecam_of_match);
 
 static int thunder_ecam_probe(struct platform_device *pdev)
 {
@@ -374,7 +372,4 @@ static struct platform_driver thunder_ecam_driver = {
        },
        .probe = thunder_ecam_probe,
 };
-module_platform_driver(thunder_ecam_driver);
-
-MODULE_DESCRIPTION("Thunder ECAM PCI host driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(thunder_ecam_driver);
index 9b8ab94f3c8c88dc2fac62f504e2ce1d0b04f8c6..6abaf80ffb395d48668dc995782cdb4b53ff4067 100644 (file)
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
 #include <linux/platform_device.h>
 
-#include "../ecam.h"
-
 #define PEM_CFG_WR 0x28
 #define PEM_CFG_RD 0x30
 
@@ -285,8 +284,9 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
        return pci_generic_config_write(bus, devfn, where, size, val);
 }
 
-static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg)
+static int thunder_pem_init(struct pci_config_window *cfg)
 {
+       struct device *dev = cfg->parent;
        resource_size_t bar4_start;
        struct resource *res_pem;
        struct thunder_pem_pci *pem_pci;
@@ -346,7 +346,6 @@ static const struct of_device_id thunder_pem_of_match[] = {
        { .compatible = "cavium,pci-host-thunder-pem" },
        { },
 };
-MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
 
 static int thunder_pem_probe(struct platform_device *pdev)
 {
@@ -360,7 +359,4 @@ static struct platform_driver thunder_pem_driver = {
        },
        .probe = thunder_pem_probe,
 };
-module_platform_driver(thunder_pem_driver);
-
-MODULE_DESCRIPTION("Thunder PEM PCIe host driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(thunder_pem_driver);
index f843a72dc51c268173c682a26a895a93e3c8d381..f234405770abf0c8dd1dd66a58687a4e182cdf05 100644 (file)
@@ -80,21 +80,21 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
        if (err)
                return err;
 
+       err = devm_request_pci_bus_resources(dev, res);
+       if (err)
+               goto out_release_res;
+
        resource_list_for_each_entry(win, res) {
-               struct resource *parent, *res = win->res;
+               struct resource *res = win->res;
 
                switch (resource_type(res)) {
                case IORESOURCE_IO:
-                       parent = &ioport_resource;
                        err = pci_remap_iospace(res, iobase);
-                       if (err) {
+                       if (err)
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
-                               continue;
-                       }
                        break;
                case IORESOURCE_MEM:
-                       parent = &iomem_resource;
                        res_valid |= !(res->flags & IORESOURCE_PREFETCH);
 
                        writel(res->start >> 28, PCI_IMAP(mem));
@@ -102,23 +102,14 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
                        mem++;
 
                        break;
-               case IORESOURCE_BUS:
-               default:
-                       continue;
                }
-
-               err = devm_request_resource(dev, parent, res);
-               if (err)
-                       goto out_release_res;
        }
 
-       if (!res_valid) {
-               dev_err(dev, "non-prefetchable memory resource required\n");
-               err = -EINVAL;
-               goto out_release_res;
-       }
+       if (res_valid)
+               return 0;
 
-       return 0;
+       dev_err(dev, "non-prefetchable memory resource required\n");
+       err = -EINVAL;
 
 out_release_res:
        pci_free_resource_list(res);
index ae00ce22d5a6eb2c7c2263f13a3fe0113c45da27..a81273c23341a4738617a5db2473ae682a30388d 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/io.h>
 #include <linux/jiffies.h>
 #include <linux/memblock.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -540,14 +540,20 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+       if (ret)
+               goto error;
+
        ret = xgene_pcie_setup(port, &res, iobase);
        if (ret)
-               return ret;
+               goto error;
 
        bus = pci_create_root_bus(&pdev->dev, 0,
                                        &xgene_pcie_ops, port, &res);
-       if (!bus)
-               return -ENOMEM;
+       if (!bus) {
+               ret = -ENOMEM;
+               goto error;
+       }
 
        pci_scan_child_bus(bus);
        pci_assign_unassigned_bus_resources(bus);
@@ -555,6 +561,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, port);
        return 0;
+
+error:
+       pci_free_resource_list(&res);
+       return ret;
 }
 
 static const struct of_device_id xgene_pcie_match_table[] = {
@@ -569,8 +579,4 @@ static struct platform_driver xgene_pcie_driver = {
        },
        .probe = xgene_pcie_probe_bridge,
 };
-module_platform_driver(xgene_pcie_driver);
-
-MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
-MODULE_DESCRIPTION("APM X-Gene PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(xgene_pcie_driver);
index dbac6fb3f0bdfbccaaceefc0c1467e80ce0345df..2b7837650db8449378f2c3b09aa4d53fc3c3177c 100644 (file)
@@ -61,6 +61,8 @@
 #define TLP_LOOP                       500
 #define RP_DEVFN                       0
 
+#define LINK_UP_TIMEOUT                        5000
+
 #define INTX_NUM                       4
 
 #define DWORD_MASK                     3
@@ -81,9 +83,30 @@ struct tlp_rp_regpair_t {
        u32 reg1;
 };
 
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+                             const u32 reg)
+{
+       writel_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+       return readl_relaxed(pcie->cra_base + reg);
+}
+
+static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+{
+       return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
+}
+
 static void altera_pcie_retrain(struct pci_dev *dev)
 {
        u16 linkcap, linkstat;
+       struct altera_pcie *pcie = dev->bus->sysdata;
+       int timeout =  0;
+
+       if (!altera_pcie_link_is_up(pcie))
+               return;
 
        /*
         * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
@@ -95,9 +118,16 @@ static void altera_pcie_retrain(struct pci_dev *dev)
                return;
 
        pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
-       if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB)
+       if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
                pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
                                         PCI_EXP_LNKCTL_RL);
+               while (!altera_pcie_link_is_up(pcie)) {
+                       timeout++;
+                       if (timeout > LINK_UP_TIMEOUT)
+                               break;
+                       udelay(5);
+               }
+       }
 }
 DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
 
@@ -120,17 +150,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int  devfn,
        return false;
 }
 
-static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
-                             const u32 reg)
-{
-       writel_relaxed(value, pcie->cra_base + reg);
-}
-
-static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
-{
-       return readl_relaxed(pcie->cra_base + reg);
-}
-
 static void tlp_write_tx(struct altera_pcie *pcie,
                         struct tlp_rp_regpair_t *tlp_rp_regdata)
 {
@@ -139,11 +158,6 @@ static void tlp_write_tx(struct altera_pcie *pcie,
        cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
 }
 
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
-{
-       return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
-}
-
 static bool altera_pcie_valid_config(struct altera_pcie *pcie,
                                     struct pci_bus *bus, int dev)
 {
@@ -415,11 +429,6 @@ static void altera_pcie_isr(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie)
-{
-       pci_free_resource_list(&pcie->resources);
-}
-
 static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
 {
        int err, res_valid = 0;
@@ -432,33 +441,25 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
        if (err)
                return err;
 
+       err = devm_request_pci_bus_resources(dev, &pcie->resources);
+       if (err)
+               goto out_release_res;
+
        resource_list_for_each_entry(win, &pcie->resources) {
-               struct resource *parent, *res = win->res;
+               struct resource *res = win->res;
 
-               switch (resource_type(res)) {
-               case IORESOURCE_MEM:
-                       parent = &iomem_resource;
+               if (resource_type(res) == IORESOURCE_MEM)
                        res_valid |= !(res->flags & IORESOURCE_PREFETCH);
-                       break;
-               default:
-                       continue;
-               }
-
-               err = devm_request_resource(dev, parent, res);
-               if (err)
-                       goto out_release_res;
        }
 
-       if (!res_valid) {
-               dev_err(dev, "non-prefetchable memory resource required\n");
-               err = -EINVAL;
-               goto out_release_res;
-       }
+       if (res_valid)
+               return 0;
 
-       return 0;
+       dev_err(dev, "non-prefetchable memory resource required\n");
+       err = -EINVAL;
 
 out_release_res:
-       altera_pcie_release_of_pci_ranges(pcie);
+       pci_free_resource_list(&pcie->resources);
        return err;
 }
 
index 55723567b5d43557a592a326115c42a9c4345e25..0f4f570068e3d74f4e4aff4ed78f6bc356f7c319 100644 (file)
@@ -5,6 +5,9 @@
  *
  * Copyright (C) 2016 Marvell Technology Group Ltd.
  *
+ * Author: Yehuda Yitshak <yehuday@marvell.com>
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
@@ -14,7 +17,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/pci.h>
 #include <linux/phy/phy.h>
@@ -244,7 +247,6 @@ static const struct of_device_id armada8k_pcie_of_match[] = {
        { .compatible = "marvell,armada8k-pcie", },
        {},
 };
-MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
 
 static struct platform_driver armada8k_pcie_driver = {
        .probe          = armada8k_pcie_probe,
@@ -253,10 +255,4 @@ static struct platform_driver armada8k_pcie_driver = {
                .of_match_table = of_match_ptr(armada8k_pcie_of_match),
        },
 };
-
-module_platform_driver(armada8k_pcie_driver);
-
-MODULE_DESCRIPTION("Armada 8k PCIe host controller driver");
-MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>");
-MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c
new file mode 100644 (file)
index 0000000..16ba70b
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * PCIe host controller driver for Axis ARTPEC-6 SoC
+ *
+ * Author: Niklas Cassel <niklas.cassel@axis.com>
+ *
+ * Based on work done by Phil Edworthy <phil@edworthys.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define to_artpec6_pcie(x)     container_of(x, struct artpec6_pcie, pp)
+
+struct artpec6_pcie {
+       struct pcie_port        pp;
+       struct regmap           *regmap;
+       void __iomem            *phy_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET                      0x700
+#define PCIE_PHY_DEBUG_R0              (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1              (PL_OFFSET + 0x2c)
+
+#define MISC_CONTROL_1_OFF             (PL_OFFSET + 0x1bc)
+#define  DBI_RO_WR_EN                  1
+
+/* ARTPEC-6 specific registers */
+#define PCIECFG                                0x18
+#define  PCIECFG_DBG_OEN               (1 << 24)
+#define  PCIECFG_CORE_RESET_REQ                (1 << 21)
+#define  PCIECFG_LTSSM_ENABLE          (1 << 20)
+#define  PCIECFG_CLKREQ_B              (1 << 11)
+#define  PCIECFG_REFCLK_ENABLE         (1 << 10)
+#define  PCIECFG_PLL_ENABLE            (1 << 9)
+#define  PCIECFG_PCLK_ENABLE           (1 << 8)
+#define  PCIECFG_RISRCREN              (1 << 4)
+#define  PCIECFG_MODE_TX_DRV_EN                (1 << 3)
+#define  PCIECFG_CISRREN               (1 << 2)
+#define  PCIECFG_MACRO_ENABLE          (1 << 0)
+
+#define NOCCFG                         0x40
+#define NOCCFG_ENABLE_CLK_PCIE         (1 << 4)
+#define NOCCFG_POWER_PCIE_IDLEACK      (1 << 3)
+#define NOCCFG_POWER_PCIE_IDLE         (1 << 2)
+#define NOCCFG_POWER_PCIE_IDLEREQ      (1 << 1)
+
+#define PHY_STATUS                     0x118
+#define PHY_COSPLLLOCK                 (1 << 0)
+
+#define ARTPEC6_CPU_TO_BUS_ADDR                0x0fffffff
+
+static int artpec6_pcie_establish_link(struct pcie_port *pp)
+{
+       struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
+       u32 val;
+       unsigned int retries;
+
+       /* Hold DW core in reset */
+       regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+       val |= PCIECFG_CORE_RESET_REQ;
+       regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+
+       regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+       val |=  PCIECFG_RISRCREN |      /* Receiver term. 50 Ohm */
+               PCIECFG_MODE_TX_DRV_EN |
+               PCIECFG_CISRREN |       /* Reference clock term. 100 Ohm */
+               PCIECFG_MACRO_ENABLE;
+       val |= PCIECFG_REFCLK_ENABLE;
+       val &= ~PCIECFG_DBG_OEN;
+       val &= ~PCIECFG_CLKREQ_B;
+       regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+       usleep_range(5000, 6000);
+
+       regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+       val |= NOCCFG_ENABLE_CLK_PCIE;
+       regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+       usleep_range(20, 30);
+
+       regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+       val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
+       regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+       usleep_range(6000, 7000);
+
+       regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+       val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+       regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+
+       retries = 50;
+       do {
+               usleep_range(1000, 2000);
+               regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+               retries--;
+       } while (retries &&
+               (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+
+       retries = 50;
+       do {
+               usleep_range(1000, 2000);
+               val = readl(artpec6_pcie->phy_base + PHY_STATUS);
+               retries--;
+       } while (retries && !(val & PHY_COSPLLLOCK));
+
+       /* Take DW core out of reset */
+       regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+       val &= ~PCIECFG_CORE_RESET_REQ;
+       regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+       usleep_range(100, 200);
+
+       /*
+        * Enable writing to config regs. This is required as the Synopsys
+        * driver changes the class code. That register needs DBI write enable.
+        */
+       writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
+
+       pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+       pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+       pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+       pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+
+       /* setup root complex */
+       dw_pcie_setup_rc(pp);
+
+       /* assert LTSSM enable */
+       regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+       val |= PCIECFG_LTSSM_ENABLE;
+       regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+
+       /* check if the link is up or not */
+       if (!dw_pcie_wait_for_link(pp))
+               return 0;
+
+       dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+               readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+               readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+
+       return -ETIMEDOUT;
+}
+
+static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
+{
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               dw_pcie_msi_init(pp);
+}
+
+static void artpec6_pcie_host_init(struct pcie_port *pp)
+{
+       artpec6_pcie_establish_link(pp);
+       artpec6_pcie_enable_interrupts(pp);
+}
+
+static int artpec6_pcie_link_up(struct pcie_port *pp)
+{
+       u32 rc;
+
+       /*
+        * Get status from Synopsys IP
+        * link is debug bit 36, debug register 1 starts at bit 32
+        */
+       rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+       if (rc)
+               return 1;
+
+       return 0;
+}
+
+static struct pcie_host_ops artpec6_pcie_host_ops = {
+       .link_up = artpec6_pcie_link_up,
+       .host_init = artpec6_pcie_host_init,
+};
+
+static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
+{
+       struct pcie_port *pp = arg;
+
+       return dw_handle_msi_irq(pp);
+}
+
+static int __init artpec6_add_pcie_port(struct pcie_port *pp,
+                                       struct platform_device *pdev)
+{
+       int ret;
+
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+               if (pp->msi_irq <= 0) {
+                       dev_err(&pdev->dev, "failed to get MSI irq\n");
+                       return -ENODEV;
+               }
+
+               ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+                                      artpec6_pcie_msi_handler,
+                                      IRQF_SHARED | IRQF_NO_THREAD,
+                                      "artpec6-pcie-msi", pp);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to request MSI irq\n");
+                       return ret;
+               }
+       }
+
+       pp->root_bus_nr = -1;
+       pp->ops = &artpec6_pcie_host_ops;
+
+       ret = dw_pcie_host_init(pp);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to initialize host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int artpec6_pcie_probe(struct platform_device *pdev)
+{
+       struct artpec6_pcie *artpec6_pcie;
+       struct pcie_port *pp;
+       struct resource *dbi_base;
+       struct resource *phy_base;
+       int ret;
+
+       artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
+                                   GFP_KERNEL);
+       if (!artpec6_pcie)
+               return -ENOMEM;
+
+       pp = &artpec6_pcie->pp;
+       pp->dev = &pdev->dev;
+
+       dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+       pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+       if (IS_ERR(pp->dbi_base))
+               return PTR_ERR(pp->dbi_base);
+
+       phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+       artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+       if (IS_ERR(artpec6_pcie->phy_base))
+               return PTR_ERR(artpec6_pcie->phy_base);
+
+       artpec6_pcie->regmap =
+               syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                               "axis,syscon-pcie");
+       if (IS_ERR(artpec6_pcie->regmap))
+               return PTR_ERR(artpec6_pcie->regmap);
+
+       ret = artpec6_add_pcie_port(pp, pdev);
+       if (ret < 0)
+               return ret;
+
+       platform_set_drvdata(pdev, artpec6_pcie);
+       return 0;
+}
+
+static const struct of_device_id artpec6_pcie_of_match[] = {
+       { .compatible = "axis,artpec6-pcie", },
+       {},
+};
+
+static struct platform_driver artpec6_pcie_driver = {
+       .probe = artpec6_pcie_probe,
+       .driver = {
+               .name   = "artpec6-pcie",
+               .of_match_table = artpec6_pcie_of_match,
+       },
+};
+builtin_platform_driver(artpec6_pcie_driver);
index b3500994d08aa2290879b7b6be46a590c115d5cb..c8079dc81c1000c377751ab27593aa7ca16fc2f9 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of_gpio.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
@@ -121,7 +121,6 @@ static const struct of_device_id dw_plat_pcie_of_match[] = {
        { .compatible = "snps,dw-pcie", },
        {},
 };
-MODULE_DEVICE_TABLE(of, dw_plat_pcie_of_match);
 
 static struct platform_driver dw_plat_pcie_driver = {
        .driver = {
@@ -130,9 +129,4 @@ static struct platform_driver dw_plat_pcie_driver = {
        },
        .probe = dw_plat_pcie_probe,
 };
-
-module_platform_driver(dw_plat_pcie_driver);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys PCIe host controller glue platform driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(dw_plat_pcie_driver);
index aafd766546f38737d3845339944bbaee29883ec5..12afce19890b747ae5cc42f00541a9b79884c312 100644 (file)
@@ -452,6 +452,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
        if (ret)
                return ret;
 
+       ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+       if (ret)
+               goto error;
+
        /* Get the I/O and memory ranges from DT */
        resource_list_for_each_entry(win, &res) {
                switch (resource_type(win->res)) {
@@ -461,11 +465,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
                        pp->io_size = resource_size(pp->io);
                        pp->io_bus_addr = pp->io->start - win->offset;
                        ret = pci_remap_iospace(pp->io, pp->io_base);
-                       if (ret) {
+                       if (ret)
                                dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
                                         ret, pp->io);
-                               continue;
-                       }
                        break;
                case IORESOURCE_MEM:
                        pp->mem = win->res;
@@ -483,8 +485,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
                case IORESOURCE_BUS:
                        pp->busn = win->res;
                        break;
-               default:
-                       continue;
                }
        }
 
@@ -493,7 +493,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                        resource_size(pp->cfg));
                if (!pp->dbi_base) {
                        dev_err(pp->dev, "error with ioremap\n");
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto error;
                }
        }
 
@@ -504,7 +505,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                                pp->cfg0_size);
                if (!pp->va_cfg0_base) {
                        dev_err(pp->dev, "error with ioremap in function\n");
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto error;
                }
        }
 
@@ -513,7 +515,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                                pp->cfg1_size);
                if (!pp->va_cfg1_base) {
                        dev_err(pp->dev, "error with ioremap\n");
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto error;
                }
        }
 
@@ -528,7 +531,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
                                                &dw_pcie_msi_chip);
                        if (!pp->irq_domain) {
                                dev_err(pp->dev, "irq domain init failed\n");
-                               return -ENXIO;
+                               ret = -ENXIO;
+                               goto error;
                        }
 
                        for (i = 0; i < MAX_MSI_IRQS; i++)
@@ -536,7 +540,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
                } else {
                        ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
                        if (ret < 0)
-                               return ret;
+                               goto error;
                }
        }
 
@@ -552,8 +556,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
        } else
                bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
                                        pp, &res);
-       if (!bus)
-               return -ENOMEM;
+       if (!bus) {
+               ret = -ENOMEM;
+               goto error;
+       }
 
        if (pp->ops->scan_bus)
                pp->ops->scan_bus(pp);
@@ -571,6 +577,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
 
        pci_bus_add_devices(bus);
        return 0;
+
+error:
+       pci_free_resource_list(&res);
+       return ret;
 }
 
 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
index 3e98d4edae2d5325fede754081a570fbb3ae0026..7ee9dfcc45fb79b7fad6a4ab1f9df7b80fdecd9c 100644 (file)
@@ -12,7 +12,7 @@
  * published by the Free Software Foundation.
  */
 #include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
@@ -235,9 +235,6 @@ static const struct of_device_id hisi_pcie_of_match[] = {
        {},
 };
 
-
-MODULE_DEVICE_TABLE(of, hisi_pcie_of_match);
-
 static struct platform_driver hisi_pcie_driver = {
        .probe  = hisi_pcie_probe,
        .driver = {
@@ -245,10 +242,4 @@ static struct platform_driver hisi_pcie_driver = {
                   .of_match_table = hisi_pcie_of_match,
        },
 };
-
-module_platform_driver(hisi_pcie_driver);
-
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_AUTHOR("Dacai Zhu <zhudacai@hisilicon.com>");
-MODULE_AUTHOR("Gabriele Paoloni <gabriele.paoloni@huawei.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(hisi_pcie_driver);
index a576aeeb22da6cec1a01784328e209a66e96e8df..e167b2f0098d687c6a197b49e2b86e56dded9514 100644 (file)
@@ -462,6 +462,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
        if (!pcie || !pcie->dev || !pcie->base)
                return -EINVAL;
 
+       ret = devm_request_pci_bus_resources(pcie->dev, res);
+       if (ret)
+               return ret;
+
        ret = phy_init(pcie->phy);
        if (ret) {
                dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
index 35092188039b99264f7911a3fdd91005c043d4d0..65db7a2215090ff4508141dc131fb37104b9a34b 100644 (file)
@@ -7,6 +7,8 @@
  *  arch/sh/drivers/pci/ops-sh7786.c
  *  Copyright (C) 2009 - 2011  Paul Mundt
  *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ *
  * This file is licensed under the terms of the GNU General Public
  * License version 2.  This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
@@ -18,7 +20,7 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -936,12 +938,6 @@ static const struct of_device_id rcar_pcie_of_match[] = {
        { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
        {},
 };
-MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
-
-static void rcar_pcie_release_of_pci_ranges(struct rcar_pcie *pci)
-{
-       pci_free_resource_list(&pci->resources);
-}
 
 static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
 {
@@ -955,37 +951,25 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
        if (err)
                return err;
 
+       err = devm_request_pci_bus_resources(dev, &pci->resources);
+       if (err)
+               goto out_release_res;
+
        resource_list_for_each_entry(win, &pci->resources) {
-               struct resource *parent, *res = win->res;
+               struct resource *res = win->res;
 
-               switch (resource_type(res)) {
-               case IORESOURCE_IO:
-                       parent = &ioport_resource;
+               if (resource_type(res) == IORESOURCE_IO) {
                        err = pci_remap_iospace(res, iobase);
-                       if (err) {
+                       if (err)
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
-                               continue;
-                       }
-                       break;
-               case IORESOURCE_MEM:
-                       parent = &iomem_resource;
-                       break;
-
-               case IORESOURCE_BUS:
-               default:
-                       continue;
                }
-
-               err = devm_request_resource(dev, parent, res);
-               if (err)
-                       goto out_release_res;
        }
 
        return 0;
 
 out_release_res:
-       rcar_pcie_release_of_pci_ranges(pci);
+       pci_free_resource_list(&pci->resources);
        return err;
 }
 
@@ -1073,8 +1057,4 @@ static struct platform_driver rcar_pcie_driver = {
        },
        .probe = rcar_pcie_probe,
 };
-module_platform_driver(rcar_pcie_driver);
-
-MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
-MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(rcar_pcie_driver);
index 3479d30e2be86cbfccd108da69377899e1e1cd92..0b597d9190b4bd4811bc8559c2fec5c247cc8765 100644 (file)
@@ -825,27 +825,33 @@ static int nwl_pcie_probe(struct platform_device *pdev)
 
        err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
        if (err) {
-               pr_err("Getting bridge resources failed\n");
+               dev_err(pcie->dev, "Getting bridge resources failed\n");
                return err;
        }
 
+       err = devm_request_pci_bus_resources(pcie->dev, &res);
+       if (err)
+               goto error;
+
        err = nwl_pcie_init_irq_domain(pcie);
        if (err) {
                dev_err(pcie->dev, "Failed creating IRQ Domain\n");
-               return err;
+               goto error;
        }
 
        bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
                                  &nwl_pcie_ops, pcie, &res);
-       if (!bus)
-               return -ENOMEM;
+       if (!bus) {
+               err = -ENOMEM;
+               goto error;
+       }
 
        if (IS_ENABLED(CONFIG_PCI_MSI)) {
                err = nwl_pcie_enable_msi(pcie, bus);
                if (err < 0) {
                        dev_err(&pdev->dev,
                                "failed to enable MSI support: %d\n", err);
-                       return err;
+                       goto error;
                }
        }
        pci_scan_child_bus(bus);
@@ -855,6 +861,10 @@ static int nwl_pcie_probe(struct platform_device *pdev)
        pci_bus_add_devices(bus);
        platform_set_drvdata(pdev, pcie);
        return 0;
+
+error:
+       pci_free_resource_list(&res);
+       return err;
 }
 
 static int nwl_pcie_remove(struct platform_device *pdev)
index 65f0fe0c2eafb67141fd84f84b31285ff038c868..a30e016395575016e7f8230e676217b197345f5d 100644 (file)
@@ -550,7 +550,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
        pcie_intc_node = of_get_next_child(node, NULL);
        if (!pcie_intc_node) {
                dev_err(dev, "No PCIe Intc node found\n");
-               return PTR_ERR(pcie_intc_node);
+               return -ENODEV;
        }
 
        port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
@@ -558,7 +558,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
                                                 port);
        if (!port->irq_domain) {
                dev_err(dev, "Failed to get a INTx IRQ domain\n");
-               return PTR_ERR(port->irq_domain);
+               return -ENODEV;
        }
 
        /* Setup MSI */
@@ -569,7 +569,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
                                                         &xilinx_pcie_msi_chip);
                if (!port->irq_domain) {
                        dev_err(dev, "Failed to get a MSI IRQ domain\n");
-                       return PTR_ERR(port->irq_domain);
+                       return -ENODEV;
                }
 
                xilinx_pcie_enable_msi(port);
@@ -660,7 +660,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
        struct xilinx_pcie_port *port;
        struct device *dev = &pdev->dev;
        struct pci_bus *bus;
-
        int err;
        resource_size_t iobase = 0;
        LIST_HEAD(res);
@@ -694,10 +693,17 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
                dev_err(dev, "Getting bridge resources failed\n");
                return err;
        }
+
+       err = devm_request_pci_bus_resources(dev, &res);
+       if (err)
+               goto error;
+
        bus = pci_create_root_bus(&pdev->dev, 0,
                                  &xilinx_pcie_ops, port, &res);
-       if (!bus)
-               return -ENOMEM;
+       if (!bus) {
+               err = -ENOMEM;
+               goto error;
+       }
 
 #ifdef CONFIG_PCI_MSI
        xilinx_pcie_msi_chip.dev = port->dev;
@@ -712,6 +718,10 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, port);
 
        return 0;
+
+error:
+       pci_free_resource_list(&res);
+       return err;
 }
 
 /**
index fa49f9143b80631108e10ef78e5e45e285c40cf8..6a33ddcfa20b45b7b5d7bbfdc04a2cf4288c8368 100644 (file)
@@ -675,6 +675,8 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
        if (bridge->is_going_away)
                return;
 
+       pm_runtime_get_sync(&bridge->pci_dev->dev);
+
        list_for_each_entry(slot, &bridge->slots, node) {
                struct pci_bus *bus = slot->bus;
                struct pci_dev *dev, *tmp;
@@ -694,6 +696,8 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                        disable_slot(slot);
                }
        }
+
+       pm_runtime_put(&bridge->pci_dev->dev);
 }
 
 /*
index 5c24e938042fd6bccb4e207d58dcb988d7c62611..08e84d61874e104ac7479f132d830f5fca98861a 100644 (file)
@@ -546,6 +546,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
        u8 present;
        bool link;
 
+       /* Interrupts cannot originate from a controller that's asleep */
+       if (pdev->current_state == PCI_D3cold)
+               return IRQ_NONE;
+
        /*
         * In order to guarantee that all interrupt events are
         * serviced, we need to re-inspect Slot Status register after
index a080f4496fe2f30ffe40c0e5835d1d4d49d3430e..a02981efdad570148e39925cfbe4a8579ca7f7ca 100644 (file)
@@ -4,6 +4,7 @@
  *
  * Copyright (C) 2003-2004 Intel
  * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Copyright (C) 2016 Christoph Hellwig.
  */
 
 #include <linux/err.h>
@@ -207,6 +208,12 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
        desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
 }
 
+static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
+{
+       return desc->mask_base +
+               desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+}
+
 /*
  * This internal function does not flush PCI writes to the device.
  * All users must ensure that they read from the device before either
@@ -217,8 +224,6 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
 {
        u32 mask_bits = desc->masked;
-       unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
-                                               PCI_MSIX_ENTRY_VECTOR_CTRL;
 
        if (pci_msi_ignore_mask)
                return 0;
@@ -226,7 +231,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
        mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
        if (flag)
                mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
-       writel(mask_bits, desc->mask_base + offset);
+       writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
 
        return mask_bits;
 }
@@ -284,8 +289,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
        BUG_ON(dev->current_state != PCI_D0);
 
        if (entry->msi_attrib.is_msix) {
-               void __iomem *base = entry->mask_base +
-                       entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+               void __iomem *base = pci_msix_desc_addr(entry);
 
                msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
                msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -315,9 +319,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
        if (dev->current_state != PCI_D0) {
                /* Don't touch the hardware now */
        } else if (entry->msi_attrib.is_msix) {
-               void __iomem *base;
-               base = entry->mask_base +
-                       entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+               void __iomem *base = pci_msix_desc_addr(entry);
 
                writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
                writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -567,6 +569,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
        entry->msi_attrib.multi_cap     = (control & PCI_MSI_FLAGS_QMASK) >> 1;
        entry->msi_attrib.multiple      = ilog2(__roundup_pow_of_two(nvec));
        entry->nvec_used                = nvec;
+       entry->affinity                 = dev->irq_affinity;
 
        if (control & PCI_MSI_FLAGS_64BIT)
                entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
@@ -678,10 +681,18 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
 static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
                              struct msix_entry *entries, int nvec)
 {
+       const struct cpumask *mask = NULL;
        struct msi_desc *entry;
-       int i;
+       int cpu = -1, i;
 
        for (i = 0; i < nvec; i++) {
+               if (dev->irq_affinity) {
+                       cpu = cpumask_next(cpu, dev->irq_affinity);
+                       if (cpu >= nr_cpu_ids)
+                               cpu = cpumask_first(dev->irq_affinity);
+                       mask = cpumask_of(cpu);
+               }
+
                entry = alloc_msi_entry(&dev->dev);
                if (!entry) {
                        if (!i)
@@ -694,10 +705,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
 
                entry->msi_attrib.is_msix       = 1;
                entry->msi_attrib.is_64         = 1;
-               entry->msi_attrib.entry_nr      = entries[i].entry;
+               if (entries)
+                       entry->msi_attrib.entry_nr = entries[i].entry;
+               else
+                       entry->msi_attrib.entry_nr = i;
                entry->msi_attrib.default_irq   = dev->irq;
                entry->mask_base                = base;
                entry->nvec_used                = 1;
+               entry->affinity                 = mask;
 
                list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
        }
@@ -712,13 +727,11 @@ static void msix_program_entries(struct pci_dev *dev,
        int i = 0;
 
        for_each_pci_msi_entry(entry, dev) {
-               int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
-                                               PCI_MSIX_ENTRY_VECTOR_CTRL;
-
-               entries[i].vector = entry->irq;
-               entry->masked = readl(entry->mask_base + offset);
+               if (entries)
+                       entries[i++].vector = entry->irq;
+               entry->masked = readl(pci_msix_desc_addr(entry) +
+                               PCI_MSIX_ENTRY_VECTOR_CTRL);
                msix_mask_irq(entry, 1);
-               i++;
        }
 }
 
@@ -931,7 +944,7 @@ EXPORT_SYMBOL(pci_msix_vec_count);
 /**
  * pci_enable_msix - configure device's MSI-X capability structure
  * @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of MSI-X entries
+ * @entries: pointer to an array of MSI-X entries (optional)
  * @nvec: number of MSI-X irqs requested for allocation by device driver
  *
  * Setup the MSI-X capability structure of device function with the number
@@ -951,22 +964,21 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
        if (!pci_msi_supported(dev, nvec))
                return -EINVAL;
 
-       if (!entries)
-               return -EINVAL;
-
        nr_entries = pci_msix_vec_count(dev);
        if (nr_entries < 0)
                return nr_entries;
        if (nvec > nr_entries)
                return nr_entries;
 
-       /* Check for any invalid entries */
-       for (i = 0; i < nvec; i++) {
-               if (entries[i].entry >= nr_entries)
-                       return -EINVAL;         /* invalid entry */
-               for (j = i + 1; j < nvec; j++) {
-                       if (entries[i].entry == entries[j].entry)
-                               return -EINVAL; /* duplicate entry */
+       if (entries) {
+               /* Check for any invalid entries */
+               for (i = 0; i < nvec; i++) {
+                       if (entries[i].entry >= nr_entries)
+                               return -EINVAL;         /* invalid entry */
+                       for (j = i + 1; j < nvec; j++) {
+                               if (entries[i].entry == entries[j].entry)
+                                       return -EINVAL; /* duplicate entry */
+                       }
                }
        }
        WARN_ON(!!dev->msix_enabled);
@@ -1026,19 +1038,8 @@ int pci_msi_enabled(void)
 }
 EXPORT_SYMBOL(pci_msi_enabled);
 
-/**
- * pci_enable_msi_range - configure device's MSI capability structure
- * @dev: device to configure
- * @minvec: minimal number of interrupts to configure
- * @maxvec: maximum number of interrupts to configure
- *
- * This function tries to allocate a maximum possible number of interrupts in a
- * range between @minvec and @maxvec. It returns a negative errno if an error
- * occurs. If it succeeds, it returns the actual number of interrupts allocated
- * and updates the @dev's irq member to the lowest new interrupt number;
- * the other interrupt numbers allocated to this device are consecutive.
- **/
-int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
+               unsigned int flags)
 {
        int nvec;
        int rc;
@@ -1061,25 +1062,85 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
        nvec = pci_msi_vec_count(dev);
        if (nvec < 0)
                return nvec;
-       else if (nvec < minvec)
+       if (nvec < minvec)
                return -EINVAL;
-       else if (nvec > maxvec)
+
+       if (nvec > maxvec)
                nvec = maxvec;
 
-       do {
+       for (;;) {
+               if (!(flags & PCI_IRQ_NOAFFINITY)) {
+                       dev->irq_affinity = irq_create_affinity_mask(&nvec);
+                       if (nvec < minvec)
+                               return -ENOSPC;
+               }
+
                rc = msi_capability_init(dev, nvec);
-               if (rc < 0) {
+               if (rc == 0)
+                       return nvec;
+
+               kfree(dev->irq_affinity);
+               dev->irq_affinity = NULL;
+
+               if (rc < 0)
                        return rc;
-               } else if (rc > 0) {
-                       if (rc < minvec)
+               if (rc < minvec)
+                       return -ENOSPC;
+
+               nvec = rc;
+       }
+}
+
+/**
+ * pci_enable_msi_range - configure device's MSI capability structure
+ * @dev: device to configure
+ * @minvec: minimal number of interrupts to configure
+ * @maxvec: maximum number of interrupts to configure
+ *
+ * This function tries to allocate a maximum possible number of interrupts in a
+ * range between @minvec and @maxvec. It returns a negative errno if an error
+ * occurs. If it succeeds, it returns the actual number of interrupts allocated
+ * and updates the @dev's irq member to the lowest new interrupt number;
+ * the other interrupt numbers allocated to this device are consecutive.
+ **/
+int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+{
+       return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY);
+}
+EXPORT_SYMBOL(pci_enable_msi_range);
+
+static int __pci_enable_msix_range(struct pci_dev *dev,
+               struct msix_entry *entries, int minvec, int maxvec,
+               unsigned int flags)
+{
+       int nvec = maxvec;
+       int rc;
+
+       if (maxvec < minvec)
+               return -ERANGE;
+
+       for (;;) {
+               if (!(flags & PCI_IRQ_NOAFFINITY)) {
+                       dev->irq_affinity = irq_create_affinity_mask(&nvec);
+                       if (nvec < minvec)
                                return -ENOSPC;
-                       nvec = rc;
                }
-       } while (rc);
 
-       return nvec;
+               rc = pci_enable_msix(dev, entries, nvec);
+               if (rc == 0)
+                       return nvec;
+
+               kfree(dev->irq_affinity);
+               dev->irq_affinity = NULL;
+
+               if (rc < 0)
+                       return rc;
+               if (rc < minvec)
+                       return -ENOSPC;
+
+               nvec = rc;
+       }
 }
-EXPORT_SYMBOL(pci_enable_msi_range);
 
 /**
  * pci_enable_msix_range - configure device's MSI-X capability structure
@@ -1097,28 +1158,101 @@ EXPORT_SYMBOL(pci_enable_msi_range);
  * with new allocated MSI-X interrupts.
  **/
 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
-                              int minvec, int maxvec)
+               int minvec, int maxvec)
 {
-       int nvec = maxvec;
-       int rc;
+       return __pci_enable_msix_range(dev, entries, minvec, maxvec,
+                       PCI_IRQ_NOAFFINITY);
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
 
-       if (maxvec < minvec)
-               return -ERANGE;
+/**
+ * pci_alloc_irq_vectors - allocate multiple IRQs for a device
+ * @dev:               PCI device to operate on
+ * @min_vecs:          minimum number of vectors required (must be >= 1)
+ * @max_vecs:          maximum (desired) number of vectors
+ * @flags:             flags or quirks for the allocation
+ *
+ * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
+ * vectors if available, and fall back to a single legacy vector
+ * if neither is available.  Return the number of vectors allocated,
+ * (which might be smaller than @max_vecs) if successful, or a negative
+ * error code on error. If less than @min_vecs interrupt vectors are
+ * available for @dev the function will fail with -ENOSPC.
+ *
+ * To get the Linux IRQ number used for a vector that can be passed to
+ * request_irq() use the pci_irq_vector() helper.
+ */
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+               unsigned int max_vecs, unsigned int flags)
+{
+       int vecs = -ENOSPC;
 
-       do {
-               rc = pci_enable_msix(dev, entries, nvec);
-               if (rc < 0) {
-                       return rc;
-               } else if (rc > 0) {
-                       if (rc < minvec)
-                               return -ENOSPC;
-                       nvec = rc;
+       if (!(flags & PCI_IRQ_NOMSIX)) {
+               vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
+                               flags);
+               if (vecs > 0)
+                       return vecs;
+       }
+
+       if (!(flags & PCI_IRQ_NOMSI)) {
+               vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
+               if (vecs > 0)
+                       return vecs;
+       }
+
+       /* use legacy irq if allowed */
+       if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1)
+               return 1;
+       return vecs;
+}
+EXPORT_SYMBOL(pci_alloc_irq_vectors);
+
+/**
+ * pci_free_irq_vectors - free previously allocated IRQs for a device
+ * @dev:               PCI device to operate on
+ *
+ * Undoes the allocations and enabling in pci_alloc_irq_vectors().
+ */
+void pci_free_irq_vectors(struct pci_dev *dev)
+{
+       pci_disable_msix(dev);
+       pci_disable_msi(dev);
+}
+EXPORT_SYMBOL(pci_free_irq_vectors);
+
+/**
+ * pci_irq_vector - return Linux IRQ number of a device vector
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ */
+int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+       if (dev->msix_enabled) {
+               struct msi_desc *entry;
+               int i = 0;
+
+               for_each_pci_msi_entry(entry, dev) {
+                       if (i == nr)
+                               return entry->irq;
+                       i++;
                }
-       } while (rc);
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
 
-       return nvec;
+       if (dev->msi_enabled) {
+               struct msi_desc *entry = first_pci_msi_entry(dev);
+
+               if (WARN_ON_ONCE(nr >= entry->nvec_used))
+                       return -EINVAL;
+       } else {
+               if (WARN_ON_ONCE(nr > 0))
+                       return -EINVAL;
+       }
+
+       return dev->irq + nr;
 }
-EXPORT_SYMBOL(pci_enable_msix_range);
+EXPORT_SYMBOL(pci_irq_vector);
 
 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
 {
index d7ffd66814bb51c14cbdf849fcd6a0a33f30dba7..e39a67c8ef397e7bd7511b266a533ab0cff817f8 100644 (file)
@@ -777,7 +777,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        if (!pci_dev->state_saved) {
                pci_save_state(pci_dev);
-               if (!pci_has_subordinate(pci_dev))
+               if (pci_power_manageable(pci_dev))
                        pci_prepare_to_sleep(pci_dev);
        }
 
@@ -1144,7 +1144,6 @@ static int pci_pm_runtime_suspend(struct device *dev)
                return -ENOSYS;
 
        pci_dev->state_saved = false;
-       pci_dev->no_d3cold = false;
        error = pm->runtime_suspend(dev);
        if (error) {
                /*
@@ -1161,8 +1160,6 @@ static int pci_pm_runtime_suspend(struct device *dev)
 
                return error;
        }
-       if (!pci_dev->d3cold_allowed)
-               pci_dev->no_d3cold = true;
 
        pci_fixup_device(pci_fixup_suspend, pci_dev);
 
index d319a9ca9b7bf9a330620313ec1e5c89d819142c..bcd10c795284cff70a58e73aac8ce2810da6831e 100644 (file)
@@ -406,6 +406,11 @@ static ssize_t d3cold_allowed_store(struct device *dev,
                return -EINVAL;
 
        pdev->d3cold_allowed = !!val;
+       if (pdev->d3cold_allowed)
+               pci_d3cold_enable(pdev);
+       else
+               pci_d3cold_disable(pdev);
+
        pm_runtime_resume(dev);
 
        return count;
index badbddc683f006fc0510c6e609a01772fc0ba4f8..aab9d5115a5f65ab4e945e5a29c00c91745a13e3 100644 (file)
@@ -7,8 +7,10 @@
  *     Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
  */
 
+#include <linux/acpi.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_pci.h>
@@ -25,7 +27,9 @@
 #include <linux/device.h>
 #include <linux/pm_runtime.h>
 #include <linux/pci_hotplug.h>
+#include <linux/vmalloc.h>
 #include <asm/setup.h>
+#include <asm/dma.h>
 #include <linux/aer.h>
 #include "pci.h"
 
@@ -81,6 +85,9 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
+#define DEFAULT_HOTPLUG_BUS_SIZE       1
+unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
+
 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
 
 /*
@@ -101,6 +108,21 @@ unsigned int pcibios_max_latency = 255;
 /* If set, the PCIe ARI capability will not be used. */
 static bool pcie_ari_disabled;
 
+/* Disable bridge_d3 for all PCIe ports */
+static bool pci_bridge_d3_disable;
+/* Force bridge_d3 for all PCIe ports */
+static bool pci_bridge_d3_force;
+
+static int __init pcie_port_pm_setup(char *str)
+{
+       if (!strcmp(str, "off"))
+               pci_bridge_d3_disable = true;
+       else if (!strcmp(str, "force"))
+               pci_bridge_d3_force = true;
+       return 1;
+}
+__setup("pcie_port_pm=", pcie_port_pm_setup);
+
 /**
  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
  * @bus: pointer to PCI bus structure to search
@@ -2155,6 +2177,164 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
                pm_runtime_put_sync(parent);
 }
 
+/**
+ * pci_bridge_d3_possible - Is it possible to put the bridge into D3
+ * @bridge: Bridge to check
+ *
+ * This function checks if it is possible to move the bridge to D3.
+ * Currently we only allow D3 for recent enough PCIe ports.
+ */
+static bool pci_bridge_d3_possible(struct pci_dev *bridge)
+{
+       unsigned int year;
+
+       if (!pci_is_pcie(bridge))
+               return false;
+
+       switch (pci_pcie_type(bridge)) {
+       case PCI_EXP_TYPE_ROOT_PORT:
+       case PCI_EXP_TYPE_UPSTREAM:
+       case PCI_EXP_TYPE_DOWNSTREAM:
+               if (pci_bridge_d3_disable)
+                       return false;
+               if (pci_bridge_d3_force)
+                       return true;
+
+               /*
+                * It should be safe to put PCIe ports from 2015 or newer
+                * to D3.
+                */
+               if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
+                   year >= 2015) {
+                       return true;
+               }
+               break;
+       }
+
+       return false;
+}
+
+static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
+{
+       bool *d3cold_ok = data;
+       bool no_d3cold;
+
+       /*
+        * The device needs to be allowed to go D3cold and if it is wake
+        * capable to do so from D3cold.
+        */
+       no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
+               (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
+               !pci_power_manageable(dev);
+
+       *d3cold_ok = !no_d3cold;
+
+       return no_d3cold;
+}
+
+/*
+ * pci_bridge_d3_update - Update bridge D3 capabilities
+ * @dev: PCI device which is changed
+ * @remove: Is the device being removed
+ *
+ * Update upstream bridge PM capabilities accordingly depending on if the
+ * device PM configuration was changed or the device is being removed.  The
+ * change is also propagated upstream.
+ */
+static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
+{
+       struct pci_dev *bridge;
+       bool d3cold_ok = true;
+
+       bridge = pci_upstream_bridge(dev);
+       if (!bridge || !pci_bridge_d3_possible(bridge))
+               return;
+
+       pci_dev_get(bridge);
+       /*
+        * If the device is removed we do not care about its D3cold
+        * capabilities.
+        */
+       if (!remove)
+               pci_dev_check_d3cold(dev, &d3cold_ok);
+
+       if (d3cold_ok) {
+               /*
+                * We need to go through all children to find out if all of
+                * them can still go to D3cold.
+                */
+               pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
+                            &d3cold_ok);
+       }
+
+       if (bridge->bridge_d3 != d3cold_ok) {
+               bridge->bridge_d3 = d3cold_ok;
+               /* Propagate change to upstream bridges */
+               pci_bridge_d3_update(bridge, false);
+       }
+
+       pci_dev_put(bridge);
+}
+
+/**
+ * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
+ * @dev: PCI device that was changed
+ *
+ * If a device is added or its PM configuration, such as is it allowed to
+ * enter D3cold, is changed this function updates upstream bridge PM
+ * capabilities accordingly.
+ */
+void pci_bridge_d3_device_changed(struct pci_dev *dev)
+{
+       pci_bridge_d3_update(dev, false);
+}
+
+/**
+ * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
+ * @dev: PCI device being removed
+ *
+ * Function updates upstream bridge PM capabilities based on other devices
+ * still left on the bus.
+ */
+void pci_bridge_d3_device_removed(struct pci_dev *dev)
+{
+       pci_bridge_d3_update(dev, true);
+}
+
+/**
+ * pci_d3cold_enable - Enable D3cold for device
+ * @dev: PCI device to handle
+ *
+ * This function can be used in drivers to enable D3cold from the device
+ * they handle.  It also updates upstream PCI bridge PM capabilities
+ * accordingly.
+ */
+void pci_d3cold_enable(struct pci_dev *dev)
+{
+       if (dev->no_d3cold) {
+               dev->no_d3cold = false;
+               pci_bridge_d3_device_changed(dev);
+       }
+}
+EXPORT_SYMBOL_GPL(pci_d3cold_enable);
+
+/**
+ * pci_d3cold_disable - Disable D3cold for device
+ * @dev: PCI device to handle
+ *
+ * This function can be used in drivers to disable D3cold from the device
+ * they handle.  It also updates upstream PCI bridge PM capabilities
+ * accordingly.
+ */
+void pci_d3cold_disable(struct pci_dev *dev)
+{
+       if (!dev->no_d3cold) {
+               dev->no_d3cold = true;
+               pci_bridge_d3_device_changed(dev);
+       }
+}
+EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+
 /**
  * pci_pm_init - Initialize PM functions of given PCI device
  * @dev: PCI device to handle.
@@ -2189,6 +2369,7 @@ void pci_pm_init(struct pci_dev *dev)
        dev->pm_cap = pm;
        dev->d3_delay = PCI_PM_D3_WAIT;
        dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
+       dev->bridge_d3 = pci_bridge_d3_possible(dev);
        dev->d3cold_allowed = true;
 
        dev->d1_support = false;
@@ -3165,6 +3346,23 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
 #endif
 }
 
+/**
+ *     pci_unmap_iospace - Unmap the memory mapped I/O space
+ *     @res: resource to be unmapped
+ *
+ *     Unmap the CPU virtual address @res from virtual address space.
+ *     Only architectures that have memory mapped IO functions defined
+ *     (and the PCI_IOBASE value defined) should call this function.
+ */
+void pci_unmap_iospace(struct resource *res)
+{
+#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+       unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
+
+       unmap_kernel_range(vaddr, resource_size(res));
+#endif
+}
+
 static void __pci_set_master(struct pci_dev *dev, bool enable)
 {
        u16 old_cmd, cmd;
@@ -4755,6 +4953,7 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
 {
        int seg, bus, slot, func, align_order, count;
+       unsigned short vendor, device, subsystem_vendor, subsystem_device;
        resource_size_t align = 0;
        char *p;
 
@@ -4768,28 +4967,55 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
                } else {
                        align_order = -1;
                }
-               if (sscanf(p, "%x:%x:%x.%x%n",
-                       &seg, &bus, &slot, &func, &count) != 4) {
-                       seg = 0;
-                       if (sscanf(p, "%x:%x.%x%n",
-                                       &bus, &slot, &func, &count) != 3) {
-                               /* Invalid format */
-                               printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
-                                       p);
+               if (strncmp(p, "pci:", 4) == 0) {
+                       /* PCI vendor/device (subvendor/subdevice) ids are specified */
+                       p += 4;
+                       if (sscanf(p, "%hx:%hx:%hx:%hx%n",
+                               &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
+                               if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
+                                       printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
+                                               p);
+                                       break;
+                               }
+                               subsystem_vendor = subsystem_device = 0;
+                       }
+                       p += count;
+                       if ((!vendor || (vendor == dev->vendor)) &&
+                               (!device || (device == dev->device)) &&
+                               (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
+                               (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
+                               if (align_order == -1)
+                                       align = PAGE_SIZE;
+                               else
+                                       align = 1 << align_order;
+                               /* Found */
                                break;
                        }
                }
-               p += count;
-               if (seg == pci_domain_nr(dev->bus) &&
-                       bus == dev->bus->number &&
-                       slot == PCI_SLOT(dev->devfn) &&
-                       func == PCI_FUNC(dev->devfn)) {
-                       if (align_order == -1)
-                               align = PAGE_SIZE;
-                       else
-                               align = 1 << align_order;
-                       /* Found */
-                       break;
+               else {
+                       if (sscanf(p, "%x:%x:%x.%x%n",
+                               &seg, &bus, &slot, &func, &count) != 4) {
+                               seg = 0;
+                               if (sscanf(p, "%x:%x.%x%n",
+                                               &bus, &slot, &func, &count) != 3) {
+                                       /* Invalid format */
+                                       printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
+                                               p);
+                                       break;
+                               }
+                       }
+                       p += count;
+                       if (seg == pci_domain_nr(dev->bus) &&
+                               bus == dev->bus->number &&
+                               slot == PCI_SLOT(dev->devfn) &&
+                               func == PCI_FUNC(dev->devfn)) {
+                               if (align_order == -1)
+                                       align = PAGE_SIZE;
+                               else
+                                       align = 1 << align_order;
+                               /* Found */
+                               break;
+                       }
                }
                if (*p != ';' && *p != ',') {
                        /* End of param or invalid format */
@@ -4897,7 +5123,7 @@ static ssize_t pci_resource_alignment_store(struct bus_type *bus,
        return pci_set_resource_alignment_param(buf, count);
 }
 
-BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
+static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
                                        pci_resource_alignment_store);
 
 static int __init pci_resource_alignment_sysfs_init(void)
@@ -4923,7 +5149,7 @@ int pci_get_new_domain_nr(void)
 }
 
 #ifdef CONFIG_PCI_DOMAINS_GENERIC
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+static int of_pci_bus_find_domain_nr(struct device *parent)
 {
        static int use_dt_domains = -1;
        int domain = -1;
@@ -4967,7 +5193,13 @@ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
                domain = -1;
        }
 
-       bus->domain_nr = domain;
+       return domain;
+}
+
+int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+       return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
+                              acpi_pci_bus_find_domain_nr(bus);
 }
 #endif
 #endif
@@ -5021,6 +5253,11 @@ static int __init pci_setup(char *str)
                                pci_hotplug_io_size = memparse(str + 9, &str);
                        } else if (!strncmp(str, "hpmemsize=", 10)) {
                                pci_hotplug_mem_size = memparse(str + 10, &str);
+                       } else if (!strncmp(str, "hpbussize=", 10)) {
+                               pci_hotplug_bus_size =
+                                       simple_strtoul(str + 10, &str, 0);
+                               if (pci_hotplug_bus_size > 0xff)
+                                       pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
                        } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
                                pcie_bus_config = PCIE_BUS_TUNE_OFF;
                        } else if (!strncmp(str, "pcie_bus_safe", 13)) {
index a814bbb80fcb3d1ddbf35508b7dcf5808f3d0e5d..9730c474b0163ccab1d9aa8027cf1c00d6ca823a 100644 (file)
@@ -82,6 +82,8 @@ void pci_pm_init(struct pci_dev *dev);
 void pci_ea_init(struct pci_dev *dev);
 void pci_allocate_cap_save_buffers(struct pci_dev *dev);
 void pci_free_cap_save_buffers(struct pci_dev *dev);
+void pci_bridge_d3_device_changed(struct pci_dev *dev);
+void pci_bridge_d3_device_removed(struct pci_dev *dev);
 
 static inline void pci_wakeup_event(struct pci_dev *dev)
 {
@@ -94,6 +96,15 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
        return !!(pci_dev->subordinate);
 }
 
+static inline bool pci_power_manageable(struct pci_dev *pci_dev)
+{
+       /*
+        * Currently we allow normal PCI devices and PCI bridges transition
+        * into D3 if their bridge_d3 is set.
+        */
+       return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3;
+}
+
 struct pci_vpd_ops {
        ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
        ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
index 22ca6412bd15c5e36d8965783f79531c58907cf6..7fcea75afa4cf9a3b702c2773ead13c75a65de80 100644 (file)
@@ -83,7 +83,7 @@ config PCIE_PME
        depends on PCIEPORTBUS && PM
 
 config PCIE_DPC
-       tristate "PCIe Downstream Port Containment support"
+       bool "PCIe Downstream Port Containment support"
        depends on PCIEPORTBUS
        default n
        help
@@ -92,6 +92,3 @@ config PCIE_DPC
          will be handled by the DPC driver.  If your system doesn't
          have this capability or you do not want to use this feature,
          it is safe to answer N.
-
-         To compile this driver as a module, choose M here: the module
-         will be called pcie-dpc.
index 2dfe7fdb77e7fe95d6b22d1cc925fe24a06c685b..0ec649d961d7a6ee515747c3458a8ce1df108099 100644 (file)
@@ -139,7 +139,7 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
 {
        /* Don't enable Clock PM if the link is not Clock PM capable */
-       if (!link->clkpm_capable && enable)
+       if (!link->clkpm_capable)
                enable = 0;
        /* Need nothing if the specified equals to current state */
        if (link->clkpm_enabled == enable)
index ab552f1bc08fae0c113085f739fc01ebc1cb2b77..250f878617863e1452f95d57e5055f8599a1e2b5 100644 (file)
@@ -15,8 +15,8 @@
 
 struct dpc_dev {
        struct pcie_device      *dev;
-       struct work_struct      work;
-       int                     cap_pos;
+       struct work_struct      work;
+       int                     cap_pos;
 };
 
 static void dpc_wait_link_inactive(struct pci_dev *pdev)
@@ -89,7 +89,7 @@ static int dpc_probe(struct pcie_device *dev)
        int status;
        u16 ctl, cap;
 
-       dpc = kzalloc(sizeof(*dpc), GFP_KERNEL);
+       dpc = devm_kzalloc(&dev->device, sizeof(*dpc), GFP_KERNEL);
        if (!dpc)
                return -ENOMEM;
 
@@ -98,11 +98,12 @@ static int dpc_probe(struct pcie_device *dev)
        INIT_WORK(&dpc->work, interrupt_event_handler);
        set_service_data(dev, dpc);
 
-       status = request_irq(dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc);
+       status = devm_request_irq(&dev->device, dev->irq, dpc_irq, IRQF_SHARED,
+                                 "pcie-dpc", dpc);
        if (status) {
                dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq,
                         status);
-               goto out;
+               return status;
        }
 
        pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
@@ -117,9 +118,6 @@ static int dpc_probe(struct pcie_device *dev)
                FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf,
                FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
        return status;
- out:
-       kfree(dpc);
-       return status;
 }
 
 static void dpc_remove(struct pcie_device *dev)
@@ -131,14 +129,11 @@ static void dpc_remove(struct pcie_device *dev)
        pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
        ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
        pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
-
-       free_irq(dev->irq, dpc);
-       kfree(dpc);
 }
 
 static struct pcie_port_service_driver dpcdriver = {
        .name           = "dpc",
-       .port_type      = PCI_EXP_TYPE_ROOT_PORT | PCI_EXP_TYPE_DOWNSTREAM,
+       .port_type      = PCIE_ANY_PORT,
        .service        = PCIE_PORT_SERVICE_DPC,
        .probe          = dpc_probe,
        .remove         = dpc_remove,
index 32d4d0a3d20e5f2b5b2e78a90adc77081ad451be..e9270b4026f3c05ff43959c8945efef3e63ae1de 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/pm.h>
+#include <linux/pm_runtime.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/pcieport_if.h>
@@ -342,6 +343,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
                return retval;
        }
 
+       pm_runtime_no_callbacks(device);
+
        return 0;
 }
 
index be35da2e105e0b39449244416f8ba8934446f58a..70d7ad8c6d17d95ba07ae6cbc7458b3b3efacac0 100644 (file)
@@ -93,6 +93,26 @@ static int pcie_port_resume_noirq(struct device *dev)
        return 0;
 }
 
+static int pcie_port_runtime_suspend(struct device *dev)
+{
+       return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
+}
+
+static int pcie_port_runtime_resume(struct device *dev)
+{
+       return 0;
+}
+
+static int pcie_port_runtime_idle(struct device *dev)
+{
+       /*
+        * Assume the PCI core has set bridge_d3 whenever it thinks the port
+        * should be good to go to D3.  Everything else, including moving
+        * the port to D3, is handled by the PCI core.
+        */
+       return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
+}
+
 static const struct dev_pm_ops pcie_portdrv_pm_ops = {
        .suspend        = pcie_port_device_suspend,
        .resume         = pcie_port_device_resume,
@@ -101,6 +121,9 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
        .poweroff       = pcie_port_device_suspend,
        .restore        = pcie_port_device_resume,
        .resume_noirq   = pcie_port_resume_noirq,
+       .runtime_suspend = pcie_port_runtime_suspend,
+       .runtime_resume = pcie_port_runtime_resume,
+       .runtime_idle   = pcie_port_runtime_idle,
 };
 
 #define PCIE_PORTDRV_PM_OPS    (&pcie_portdrv_pm_ops)
@@ -134,16 +157,39 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
                return status;
 
        pci_save_state(dev);
+
        /*
-        * D3cold may not work properly on some PCIe port, so disable
-        * it by default.
+        * Prevent runtime PM if the port is advertising support for PCIe
+        * hotplug.  Otherwise the BIOS hotplug SMI code might not be able
+        * to enumerate devices behind this port properly (the port is
+        * powered down preventing all config space accesses to the
+        * subordinate devices).  We can't be sure for native PCIe hotplug
+        * either so prevent that as well.
         */
-       dev->d3cold_allowed = false;
+       if (!dev->is_hotplug_bridge) {
+               /*
+                * Keep the port resumed 100ms to make sure things like
+                * config space accesses from userspace (lspci) will not
+                * cause the port to repeatedly suspend and resume.
+                */
+               pm_runtime_set_autosuspend_delay(&dev->dev, 100);
+               pm_runtime_use_autosuspend(&dev->dev);
+               pm_runtime_mark_last_busy(&dev->dev);
+               pm_runtime_put_autosuspend(&dev->dev);
+               pm_runtime_allow(&dev->dev);
+       }
+
        return 0;
 }
 
 static void pcie_portdrv_remove(struct pci_dev *dev)
 {
+       if (!dev->is_hotplug_bridge) {
+               pm_runtime_forbid(&dev->dev);
+               pm_runtime_get_noresume(&dev->dev);
+               pm_runtime_dont_use_autosuspend(&dev->dev);
+       }
+
        pcie_port_device_remove(dev);
 }
 
index 8e3ef720997dfba2858c4dae77b2a0651f678c2d..93f280df342824fd89c5206f99ff2733fd7e7044 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/aer.h>
 #include <linux/acpi.h>
 #include <linux/irqdomain.h>
+#include <linux/pm_runtime.h>
 #include "pci.h"
 
 #define CARDBUS_LATENCY_TIMER  176     /* secondary latency timer */
@@ -832,6 +833,12 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
        u8 primary, secondary, subordinate;
        int broken = 0;
 
+       /*
+        * Make sure the bridge is powered on to be able to access config
+        * space of devices below it.
+        */
+       pm_runtime_get_sync(&dev->dev);
+
        pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
        primary = buses & 0xFF;
        secondary = (buses >> 8) & 0xFF;
@@ -1012,6 +1019,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
 out:
        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
 
+       pm_runtime_put(&dev->dev);
+
        return max;
 }
 EXPORT_SYMBOL(pci_scan_bridge);
@@ -2076,6 +2085,15 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
                                max = pci_scan_bridge(bus, dev, max, pass);
                }
 
+       /*
+        * Make sure a hotplug bridge has at least the minimum requested
+        * number of buses.
+        */
+       if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
+               if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
+                       max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+       }
+
        /*
         * We've scanned the bus and so we know all about what's on
         * the other side of any bridges that may be on this bus plus
@@ -2127,7 +2145,9 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
        b->sysdata = sysdata;
        b->ops = ops;
        b->number = b->busn_res.start = bus;
-       pci_bus_assign_domain_nr(b, parent);
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+       b->domain_nr = pci_bus_find_domain_nr(b, parent);
+#endif
        b2 = pci_find_bus(pci_domain_nr(b), bus);
        if (b2) {
                /* If we already got to this bus through a different bridge, ignore it */
index 3f155e78513fd4171dc4b57c5b7b3b41e7afea20..2408abe4ee8c68919d8a79465e62ab16f3c8660e 100644 (file)
@@ -231,7 +231,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct pci_dev *dev = PDE_DATA(file_inode(file));
        struct pci_filp_private *fpriv = file->private_data;
-       int i, ret;
+       int i, ret, write_combine;
 
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
@@ -245,9 +245,12 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
        if (i >= PCI_ROM_RESOURCE)
                return -ENODEV;
 
+       if (fpriv->mmap_state == pci_mmap_mem)
+               write_combine = fpriv->write_combine;
+       else
+               write_combine = 0;
        ret = pci_mmap_page_range(dev, vma,
-                                 fpriv->mmap_state,
-                                 fpriv->write_combine);
+                                 fpriv->mmap_state, write_combine);
        if (ret < 0)
                return ret;
 
index ee72ebe18f4b5a6510d043da9abcee50cbada987..37ff0158e45f1a3aa453e5da757d21a11e1c3267 100644 (file)
@@ -3189,13 +3189,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
 }
 
 /*
- * Atheros AR93xx chips do not behave after a bus reset.  The device will
- * throw a Link Down error on AER-capable systems and regardless of AER,
- * config space of the device is never accessible again and typically
- * causes the system to hang or reset when access is attempted.
+ * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
+ * The device will throw a Link Down error on AER-capable systems and
+ * regardless of AER, config space of the device is never accessible again
+ * and typically causes the system to hang or reset when access is attempted.
  * http://www.spinics.net/lists/linux-pci/msg34797.html
  */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
 
 static void quirk_no_pm_reset(struct pci_dev *dev)
 {
@@ -3711,6 +3713,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
                         quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
+                        quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
                         quirk_dma_func1_alias);
@@ -3747,6 +3752,9 @@ static const struct pci_device_id fixed_dma_alias_tbl[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
                         PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
          .driver_data = PCI_DEVFN(1, 0) },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
+                        PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */
+         .driver_data = PCI_DEVFN(1, 0) },
        { 0 }
 };
 
@@ -4087,6 +4095,7 @@ static const struct pci_dev_acs_enabled {
        { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
        { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
+       { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
index 8982026637d5598a27c9a36d3b265c6b74cb4a0d..d1ef7acf69307f1377782544bde6011af4e54df2 100644 (file)
@@ -96,6 +96,8 @@ static void pci_remove_bus_device(struct pci_dev *dev)
                dev->subordinate = NULL;
        }
 
+       pci_bridge_d3_device_removed(dev);
+
        pci_destroy_dev(dev);
 }
 
index d678c46e5f03309a8b8babba4b9b1e7684a26f43..c74059e10a6de9d1afe89cea272c4146bc183e68 100644 (file)
@@ -1428,6 +1428,74 @@ void pci_bus_assign_resources(const struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pci_bus_assign_resources);
 
+static void pci_claim_device_resources(struct pci_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
+               struct resource *r = &dev->resource[i];
+
+               if (!r->flags || r->parent)
+                       continue;
+
+               pci_claim_resource(dev, i);
+       }
+}
+
+static void pci_claim_bridge_resources(struct pci_dev *dev)
+{
+       int i;
+
+       for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
+               struct resource *r = &dev->resource[i];
+
+               if (!r->flags || r->parent)
+                       continue;
+
+               pci_claim_bridge_resource(dev, i);
+       }
+}
+
+static void pci_bus_allocate_dev_resources(struct pci_bus *b)
+{
+       struct pci_dev *dev;
+       struct pci_bus *child;
+
+       list_for_each_entry(dev, &b->devices, bus_list) {
+               pci_claim_device_resources(dev);
+
+               child = dev->subordinate;
+               if (child)
+                       pci_bus_allocate_dev_resources(child);
+       }
+}
+
+static void pci_bus_allocate_resources(struct pci_bus *b)
+{
+       struct pci_bus *child;
+
+       /*
+        * Carry out a depth-first search on the PCI bus
+        * tree to allocate bridge apertures. Read the
+        * programmed bridge bases and recursively claim
+        * the respective bridge resources.
+        */
+       if (b->self) {
+               pci_read_bridge_bases(b);
+               pci_claim_bridge_resources(b->self);
+       }
+
+       list_for_each_entry(child, &b->children, node)
+               pci_bus_allocate_resources(child);
+}
+
+void pci_bus_claim_resources(struct pci_bus *b)
+{
+       pci_bus_allocate_resources(b);
+       pci_bus_allocate_dev_resources(b);
+}
+EXPORT_SYMBOL(pci_bus_claim_resources);
+
 static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
                                          struct list_head *add_head,
                                          struct list_head *fail_head)
index adf61b43eb70716f5b00d2c019fec6e4099376e1..734a0428ef0efb90aaef478a957ae9f34a5db30e 100644 (file)
@@ -4854,20 +4854,17 @@ static int
 lpfc_enable_pci_dev(struct lpfc_hba *phba)
 {
        struct pci_dev *pdev;
-       int bars = 0;
 
        /* Obtain PCI device reference */
        if (!phba->pcidev)
                goto out_error;
        else
                pdev = phba->pcidev;
-       /* Select PCI BARs */
-       bars = pci_select_bars(pdev, IORESOURCE_MEM);
        /* Enable PCI device */
        if (pci_enable_device_mem(pdev))
                goto out_error;
        /* Request PCI resource for the device */
-       if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+       if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
                goto out_disable_device;
        /* Set up device as PCI master and save state for EEH */
        pci_set_master(pdev);
@@ -4884,7 +4881,7 @@ out_disable_device:
        pci_disable_device(pdev);
 out_error:
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "1401 Failed to enable pci device, bars:x%x\n", bars);
+                       "1401 Failed to enable pci device\n");
        return -ENODEV;
 }
 
@@ -4899,17 +4896,14 @@ static void
 lpfc_disable_pci_dev(struct lpfc_hba *phba)
 {
        struct pci_dev *pdev;
-       int bars;
 
        /* Obtain PCI device reference */
        if (!phba->pcidev)
                return;
        else
                pdev = phba->pcidev;
-       /* Select PCI BARs */
-       bars = pci_select_bars(pdev, IORESOURCE_MEM);
        /* Release PCI resource and disable PCI device */
-       pci_release_selected_regions(pdev, bars);
+       pci_release_mem_regions(pdev);
        pci_disable_device(pdev);
 
        return;
@@ -9811,7 +9805,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
        struct lpfc_vport **vports;
        struct lpfc_hba   *phba = vport->phba;
        int i;
-       int bars = pci_select_bars(pdev, IORESOURCE_MEM);
 
        spin_lock_irq(&phba->hbalock);
        vport->load_flag |= FC_UNLOADING;
@@ -9886,7 +9879,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
 
        lpfc_hba_free(phba);
 
-       pci_release_selected_regions(pdev, bars);
+       pci_release_mem_regions(pdev);
        pci_disable_device(pdev);
 }
 
index c10972fcc8e489fe07598c9cc036574c926a4a4b..4fd041bec332c154604d6e1c262a70f791604fae 100644 (file)
@@ -387,7 +387,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
         * need to have the registers polled during D3, so avoid D3cold.
         */
        if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
-               pdev->no_d3cold = true;
+               pci_d3cold_disable(pdev);
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
                xhci_pme_quirk(hcd);
index fbe8e164a4ee93db99fab879c7284721546204e8..8dd6e01f45c0eb56ce4c068633700b7a386b6c52 100644 (file)
@@ -783,6 +783,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
  * NAND Flash Manufacturer ID Codes
  */
 #define NAND_MFR_TOSHIBA       0x98
+#define NAND_MFR_ESMT          0xc8
 #define NAND_MFR_SAMSUNG       0xec
 #define NAND_MFR_FUJITSU       0x04
 #define NAND_MFR_NATIONAL      0x8f
index 7f041bd88b8244f8fa243c71d508e55dd08b4160..c425c7b4c2a09af2cbb38ea25b780463dbe3230f 100644 (file)
@@ -173,10 +173,10 @@ struct spi_nor {
        int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
        int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
 
-       int (*read)(struct spi_nor *nor, loff_t from,
-                       size_t len, size_t *retlen, u_char *read_buf);
-       void (*write)(struct spi_nor *nor, loff_t to,
-                       size_t len, size_t *retlen, const u_char *write_buf);
+       ssize_t (*read)(struct spi_nor *nor, loff_t from,
+                       size_t len, u_char *read_buf);
+       ssize_t (*write)(struct spi_nor *nor, loff_t to,
+                       size_t len, const u_char *write_buf);
        int (*erase)(struct spi_nor *nor, loff_t offs);
 
        int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
index 89ab0572dbc63cfe4375cdae9528809b3b0bdd62..7d63a66e8ed43a7b696f0e061a0d9a8d051b98fc 100644 (file)
@@ -24,6 +24,8 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
 }
 extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
 
+extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res);
+
 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
 {
        struct pci_bus *pbus = pdev->bus;
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
new file mode 100644 (file)
index 0000000..7adad20
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+#ifndef DRIVERS_PCI_ECAM_H
+#define DRIVERS_PCI_ECAM_H
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+/*
+ * struct to hold pci ops and bus shift of the config window
+ * for a PCI controller.
+ */
+struct pci_config_window;
+struct pci_ecam_ops {
+       unsigned int                    bus_shift;
+       struct pci_ops                  pci_ops;
+       int                             (*init)(struct pci_config_window *);
+};
+
+/*
+ * struct to hold the mappings of a config space window. This
+ * is expected to be used as sysdata for PCI controllers that
+ * use ECAM.
+ */
+struct pci_config_window {
+       struct resource                 res;
+       struct resource                 busr;
+       void                            *priv;
+       struct pci_ecam_ops             *ops;
+       union {
+               void __iomem            *win;   /* 64-bit single mapping */
+               void __iomem            **winp; /* 32-bit per-bus mapping */
+       };
+       struct device                   *parent;/* ECAM res was from this dev */
+};
+
+/* create and free pci_config_window */
+struct pci_config_window *pci_ecam_create(struct device *dev,
+               struct resource *cfgres, struct resource *busr,
+               struct pci_ecam_ops *ops);
+void pci_ecam_free(struct pci_config_window *cfg);
+
+/* map_bus when ->sysdata is an instance of pci_config_window */
+void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
+                              int where);
+/* default ECAM ops */
+extern struct pci_ecam_ops pci_generic_ecam_ops;
+
+#ifdef CONFIG_PCI_HOST_GENERIC
+/* for DT-based PCI controllers that support ECAM */
+int pci_host_common_probe(struct platform_device *pdev,
+                         struct pci_ecam_ops *ops);
+#endif
+#endif
index c40ac910cce4a9b51c81f0c21a613044d28c0309..2599a980340f44f2550bacded406cb34f38808dc 100644 (file)
@@ -101,6 +101,10 @@ enum {
        DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
 };
 
+/*
+ * pci_power_t values must match the bits in the Capabilities PME_Support
+ * and Control/Status PowerState fields in the Power Management capability.
+ */
 typedef int __bitwise pci_power_t;
 
 #define PCI_D0         ((pci_power_t __force) 0)
@@ -116,7 +120,7 @@ extern const char *pci_power_names[];
 
 static inline const char *pci_power_name(pci_power_t state)
 {
-       return pci_power_names[1 + (int) state];
+       return pci_power_names[1 + (__force int) state];
 }
 
 #define PCI_PM_D2_DELAY                200
@@ -294,6 +298,7 @@ struct pci_dev {
        unsigned int    d2_support:1;   /* Low power state D2 is supported */
        unsigned int    no_d1d2:1;      /* D1 and D2 are forbidden */
        unsigned int    no_d3cold:1;    /* D3cold is forbidden */
+       unsigned int    bridge_d3:1;    /* Allow D3 for bridge */
        unsigned int    d3cold_allowed:1;       /* D3cold is allowed by user */
        unsigned int    mmio_always_on:1;       /* disallow turning off io/mem
                                                   decoding during bar sizing */
@@ -320,6 +325,7 @@ struct pci_dev {
         * directly, use the values stored here. They might be different!
         */
        unsigned int    irq;
+       struct cpumask  *irq_affinity;
        struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
 
        bool match_driver;              /* Skip attaching driver */
@@ -1084,6 +1090,8 @@ int pci_back_from_sleep(struct pci_dev *dev);
 bool pci_dev_run_wake(struct pci_dev *dev);
 bool pci_check_pme_status(struct pci_dev *dev);
 void pci_pme_wakeup_bus(struct pci_bus *bus);
+void pci_d3cold_enable(struct pci_dev *dev);
+void pci_d3cold_disable(struct pci_dev *dev);
 
 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
                                  bool enable)
@@ -1115,6 +1123,7 @@ int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
 void pci_bus_assign_resources(const struct pci_bus *bus);
+void pci_bus_claim_resources(struct pci_bus *bus);
 void pci_bus_size_bridges(struct pci_bus *bus);
 int pci_claim_resource(struct pci_dev *, int);
 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
@@ -1144,9 +1153,12 @@ void pci_add_resource(struct list_head *resources, struct resource *res);
 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
                             resource_size_t offset);
 void pci_free_resource_list(struct list_head *resources);
-void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags);
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+                         unsigned int flags);
 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
 void pci_bus_remove_resources(struct pci_bus *bus);
+int devm_request_pci_bus_resources(struct device *dev,
+                                  struct list_head *resources);
 
 #define pci_bus_for_each_resource(bus, res, i)                         \
        for (i = 0;                                                     \
@@ -1168,6 +1180,7 @@ int pci_register_io_range(phys_addr_t addr, resource_size_t size);
 unsigned long pci_address_to_pio(phys_addr_t addr);
 phys_addr_t pci_pio_to_address(unsigned long pio);
 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+void pci_unmap_iospace(struct resource *res);
 
 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
 {
@@ -1238,6 +1251,11 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
                      unsigned int command_bits, u32 flags);
 
+#define PCI_IRQ_NOLEGACY       (1 << 0) /* don't use legacy interrupts */
+#define PCI_IRQ_NOMSI          (1 << 1) /* don't use MSI interrupts */
+#define PCI_IRQ_NOMSIX         (1 << 2) /* don't use MSI-X interrupts */
+#define PCI_IRQ_NOAFFINITY     (1 << 3) /* don't auto-assign affinity */
+
 /* kmem_cache style wrapper around pci_alloc_consistent() */
 
 #include <linux/pci-dma.h>
@@ -1285,6 +1303,11 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
                return rc;
        return 0;
 }
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+               unsigned int max_vecs, unsigned int flags);
+void pci_free_irq_vectors(struct pci_dev *dev);
+int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+
 #else
 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
 static inline void pci_msi_shutdown(struct pci_dev *dev) { }
@@ -1308,6 +1331,24 @@ static inline int pci_enable_msix_range(struct pci_dev *dev,
 static inline int pci_enable_msix_exact(struct pci_dev *dev,
                      struct msix_entry *entries, int nvec)
 { return -ENOSYS; }
+static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
+               unsigned int min_vecs, unsigned int max_vecs,
+               unsigned int flags)
+{
+       if (min_vecs > 1)
+               return -EINVAL;
+       return 1;
+}
+static inline void pci_free_irq_vectors(struct pci_dev *dev)
+{
+}
+
+static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+       if (WARN_ON_ONCE(nr > 0))
+               return -EINVAL;
+       return dev->irq;
+}
 #endif
 
 #ifdef CONFIG_PCIEPORTBUS
@@ -1390,12 +1431,13 @@ static inline int pci_domain_nr(struct pci_bus *bus)
 {
        return bus->domain_nr;
 }
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
+#ifdef CONFIG_ACPI
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
 #else
-static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
-                                       struct device *parent)
-{
-}
+static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{ return 0; }
+#endif
+int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
 #endif
 
 /* some architectures require additional setup to direct VGA traffic */
@@ -1403,6 +1445,34 @@ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
                      unsigned int command_bits, u32 flags);
 void pci_register_set_vga_state(arch_set_vga_state_t func);
 
+static inline int
+pci_request_io_regions(struct pci_dev *pdev, const char *name)
+{
+       return pci_request_selected_regions(pdev,
+                           pci_select_bars(pdev, IORESOURCE_IO), name);
+}
+
+static inline void
+pci_release_io_regions(struct pci_dev *pdev)
+{
+       return pci_release_selected_regions(pdev,
+                           pci_select_bars(pdev, IORESOURCE_IO));
+}
+
+static inline int
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+{
+       return pci_request_selected_regions(pdev,
+                           pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+static inline void
+pci_release_mem_regions(struct pci_dev *pdev)
+{
+       return pci_release_selected_regions(pdev,
+                           pci_select_bars(pdev, IORESOURCE_MEM));
+}
+
 #else /* CONFIG_PCI is not enabled */
 
 static inline void pci_set_flags(int flags) { }
@@ -1555,7 +1625,11 @@ static inline const char *pci_name(const struct pci_dev *pdev)
 /* Some archs don't want to expose struct resource to userland as-is
  * in sysfs and /proc
  */
-#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
+#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+                         const struct resource *rsrc,
+                         resource_size_t *start, resource_size_t *end);
+#else
 static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
                const struct resource *rsrc, resource_size_t *start,
                resource_size_t *end)
@@ -1707,6 +1781,7 @@ extern u8 pci_cache_line_size;
 
 extern unsigned long pci_hotplug_io_size;
 extern unsigned long pci_hotplug_mem_size;
+extern unsigned long pci_hotplug_bus_size;
 
 /* Architecture-specific versions may override these (weak) */
 void pcibios_disable_device(struct pci_dev *dev);
@@ -1723,7 +1798,7 @@ void pcibios_free_irq(struct pci_dev *dev);
 extern struct dev_pm_ops pcibios_pm_ops;
 #endif
 
-#ifdef CONFIG_PCI_MMCONFIG
+#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
 void __init pci_mmcfg_early_init(void);
 void __init pci_mmcfg_late_init(void);
 #else
This page took 0.214837 seconds and 5 git commands to generate.