summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/Kconfig8
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-bcm2835.c2
-rw-r--r--drivers/clk/clk-divider.c8
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c1
-rw-r--r--drivers/clk/clk-gate.c7
-rw-r--r--drivers/clk/clk-mux.c19
-rw-r--r--drivers/clk/clk-nomadik.c4
-rw-r--r--drivers/clk/clk-prima2.c2
-rw-r--r--drivers/clk/clk-s2mps11.c273
-rw-r--r--drivers/clk/clk-u300.c4
-rw-r--r--drivers/clk/clk-wm831x.c16
-rw-r--r--drivers/clk/clk.c450
-rw-r--r--drivers/clk/mmp/clk-mmp2.c39
-rw-r--r--drivers/clk/mmp/clk-pxa168.c40
-rw-r--r--drivers/clk/mmp/clk-pxa910.c31
-rw-r--r--drivers/clk/mvebu/armada-370.c14
-rw-r--r--drivers/clk/mvebu/armada-xp.c12
-rw-r--r--drivers/clk/mvebu/clk-cpu.c4
-rw-r--r--drivers/clk/mvebu/common.c18
-rw-r--r--drivers/clk/mvebu/dove.c12
-rw-r--r--drivers/clk/mvebu/kirkwood.c14
-rw-r--r--drivers/clk/mxs/clk-imx23.c1
-rw-r--r--drivers/clk/mxs/clk.h4
-rw-r--r--drivers/clk/samsung/Makefile3
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c8
-rw-r--r--drivers/clk/samsung/clk-exynos4.c605
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c129
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c123
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c18
-rw-r--r--drivers/clk/samsung/clk-pll.c701
-rw-r--r--drivers/clk/samsung/clk-pll.h85
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c473
-rw-r--r--drivers/clk/samsung/clk.c10
-rw-r--r--drivers/clk/samsung/clk.h55
-rw-r--r--drivers/clk/spear/spear1310_clock.c179
-rw-r--r--drivers/clk/spear/spear1340_clock.c97
-rw-r--r--drivers/clk/spear/spear3xx_clock.c57
-rw-r--r--drivers/clk/spear/spear6xx_clock.c35
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c270
-rw-r--r--drivers/clk/tegra/clk-tegra114.c38
-rw-r--r--drivers/clk/tegra/clk-tegra20.c8
-rw-r--r--drivers/clk/tegra/clk-tegra30.c37
-rw-r--r--drivers/clk/versatile/clk-vexpress.c4
-rw-r--r--drivers/clk/zynq/clkc.c82
-rw-r--r--drivers/clk/zynq/pll.c19
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c12
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c209
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/amba-pl08x.c524
-rw-r--r--drivers/dma/dmaengine.c83
-rw-r--r--drivers/dma/dmatest.c182
-rw-r--r--drivers/dma/dw/core.c39
-rw-r--r--drivers/dma/dw/platform.c1
-rw-r--r--drivers/dma/edma.c164
-rw-r--r--drivers/dma/ep93xx_dma.c10
-rw-r--r--drivers/dma/fsldma.c10
-rw-r--r--drivers/dma/imx-dma.c6
-rw-r--r--drivers/dma/imx-sdma.c179
-rw-r--r--drivers/dma/ioat/dma_v3.c26
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c837
-rw-r--r--drivers/dma/mmp_pdma.c320
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mpc512x_dma.c10
-rw-r--r--drivers/dma/mv_xor.c57
-rw-r--r--drivers/dma/mv_xor.h28
-rw-r--r--drivers/dma/mxs-dma.c27
-rw-r--r--drivers/dma/of-dma.c3
-rw-r--r--drivers/dma/pch_dma.c10
-rw-r--r--drivers/dma/pl330.c179
-rw-r--r--drivers/dma/sh/Kconfig10
-rw-r--r--drivers/dma/sh/Makefile6
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c655
-rw-r--r--drivers/dma/sh/shdma-arm.h51
-rw-r--r--drivers/dma/sh/shdma-base.c26
-rw-r--r--drivers/dma/sh/shdma-of.c5
-rw-r--r--drivers/dma/sh/shdma-r8a73a4.c77
-rw-r--r--drivers/dma/sh/shdma.h16
-rw-r--r--drivers/dma/sh/shdmac.c (renamed from drivers/dma/sh/shdma.c)160
-rw-r--r--drivers/dma/sh/sudmac.c22
-rw-r--r--drivers/dma/sirf-dma.c134
-rw-r--r--drivers/dma/ste_dma40.c23
-rw-r--r--drivers/dma/tegra20-apb-dma.c8
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c22
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c11
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c164
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h34
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c83
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c99
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c17
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/vga/vgaarb.c51
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/ina2xx.c3
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic.c6
-rw-r--r--drivers/irqchip/irq-mmp.c495
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/dm-cache-target.c59
-rw-r--r--drivers/md/dm-crypt.c10
-rw-r--r--drivers/md/dm-ioctl.c60
-rw-r--r--drivers/md/dm-kcopyd.c3
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-stats.c969
-rw-r--r--drivers/md/dm-stats.h40
-rw-r--r--drivers/md/dm-stripe.c1
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm-target.c9
-rw-r--r--drivers/md/dm-thin.c122
-rw-r--r--drivers/md/dm.c70
-rw-r--r--drivers/md/dm.h27
-rw-r--r--drivers/md/md.c54
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c5
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h5
-rw-r--r--drivers/md/persistent-data/dm-btree.c28
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c77
-rw-r--r--drivers/md/raid5.c362
-rw-r--r--drivers/md/raid5.h22
-rw-r--r--drivers/mmc/card/block.c47
-rw-r--r--drivers/mmc/card/mmc_test.c14
-rw-r--r--drivers/mmc/core/core.c44
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c13
-rw-r--r--drivers/mmc/core/slot-gpio.c14
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c34
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c4
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c21
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/mmc_spi.c49
-rw-r--r--drivers/mmc/host/mvsdio.c3
-rw-r--r--drivers/mmc/host/mxs-mmc.c22
-rw-r--r--drivers/mmc/host/of_mmc_spi.c46
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c3
-rw-r--r--drivers/mmc/host/sdhci-s3c.c8
-rw-r--r--drivers/mmc/host/sdhci-sirf.c2
-rw-r--r--drivers/mmc/host/sdhci.c7
-rw-r--r--drivers/mmc/host/sh_mmcif.c63
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c23
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c24
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/of/base.c200
-rw-r--r--drivers/of/fdt.c98
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_net.c2
-rw-r--r--drivers/of/platform.c21
-rw-r--r--drivers/power/Kconfig15
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ab8500_charger.c1
-rw-r--r--drivers/power/bq24190_charger.c1549
-rw-r--r--drivers/power/collie_battery.c2
-rw-r--r--drivers/power/max8925_power.c1
-rw-r--r--drivers/power/power_supply_core.c38
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/msm-poweroff.c73
-rw-r--r--drivers/power/reset/xgene-reboot.c103
-rw-r--r--drivers/power/rx51_battery.c14
-rw-r--r--drivers/power/tosa_battery.c2
-rw-r--r--drivers/power/twl4030_charger.c7
-rw-r--r--drivers/power/twl4030_madc_battery.c245
-rw-r--r--drivers/s390/block/dasd_diag.c4
-rw-r--r--drivers/s390/char/fs3270.c6
-rw-r--r--drivers/s390/char/sclp.c6
-rw-r--r--drivers/s390/char/tty3270.c6
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c2
-rw-r--r--drivers/video/ps3fb.c2
206 files changed, 11471 insertions, 2814 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 51380d6..279407a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -27,7 +27,7 @@ config COMMON_CLK_DEBUG
bool "DebugFS representation of clock tree"
select DEBUG_FS
---help---
- Creates a directory hierchy in debugfs for visualizing the clk
+ Creates a directory hierarchy in debugfs for visualizing the clk
tree structure. Each directory contains read-only members
that export information specific to that clk node: clk_rate,
clk_flags, clk_prepare_count, clk_enable_count &
@@ -64,6 +64,12 @@ config COMMON_CLK_SI5351
This driver supports Silicon Labs 5351A/B/C programmable clock
generators.
+config COMMON_CLK_S2MPS11
+ tristate "Clock driver for S2MPS11 MFD"
+ depends on MFD_SEC_CORE
+ ---help---
+ This driver supports S2MPS11 crystal oscillator clock.
+
config CLK_TWL6040
tristate "External McPDM functional clock from twl6040"
depends on TWL6040_CORE
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 4038c2b..7b11106 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -40,5 +40,6 @@ obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
+obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index 792bc57..5fb4ff5 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -23,7 +23,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
-static const __initconst struct of_device_id clk_match[] = {
+static const struct of_device_id clk_match[] __initconst = {
{ .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
{ }
};
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 6d55eb2..8d3009e 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -104,7 +104,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
unsigned int div, val;
- val = readl(divider->reg) >> divider->shift;
+ val = clk_readl(divider->reg) >> divider->shift;
val &= div_mask(divider);
div = _get_div(divider, val);
@@ -230,11 +230,11 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = div_mask(divider) << (divider->shift + 16);
} else {
- val = readl(divider->reg);
+ val = clk_readl(divider->reg);
val &= ~(div_mask(divider) << divider->shift);
}
val |= value << divider->shift;
- writel(val, divider->reg);
+ clk_writel(val, divider->reg);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
@@ -317,6 +317,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
return _register_divider(dev, name, parent_name, flags, reg, shift,
width, clk_divider_flags, NULL, lock);
}
+EXPORT_SYMBOL_GPL(clk_register_divider);
/**
* clk_register_divider_table - register a table based divider clock with
@@ -341,3 +342,4 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
return _register_divider(dev, name, parent_name, flags, reg, shift,
width, clk_divider_flags, table, lock);
}
+EXPORT_SYMBOL_GPL(clk_register_divider_table);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 9ff7d51..0e1d89b 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -97,6 +97,8 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
+
#ifdef CONFIG_OF
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index dc58fbd..1ed591a 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -80,6 +80,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
#ifdef CONFIG_OF
/**
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 790306e..4a58c55 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -58,7 +58,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
if (set)
reg |= BIT(gate->bit_idx);
} else {
- reg = readl(gate->reg);
+ reg = clk_readl(gate->reg);
if (set)
reg |= BIT(gate->bit_idx);
@@ -66,7 +66,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
reg &= ~BIT(gate->bit_idx);
}
- writel(reg, gate->reg);
+ clk_writel(reg, gate->reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
@@ -89,7 +89,7 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
u32 reg;
struct clk_gate *gate = to_clk_gate(hw);
- reg = readl(gate->reg);
+ reg = clk_readl(gate->reg);
/* if a set bit disables this clk, flip it before masking */
if (gate->flags & CLK_GATE_SET_TO_DISABLE)
@@ -161,3 +161,4 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_gate);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 614444c..4f96ff3 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -42,7 +42,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
* OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
* val = 0x4 really means "bit 2, index starts at bit 0"
*/
- val = readl(mux->reg) >> mux->shift;
+ val = clk_readl(mux->reg) >> mux->shift;
val &= mux->mask;
if (mux->table) {
@@ -89,11 +89,11 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
if (mux->flags & CLK_MUX_HIWORD_MASK) {
val = mux->mask << (mux->shift + 16);
} else {
- val = readl(mux->reg);
+ val = clk_readl(mux->reg);
val &= ~(mux->mask << mux->shift);
}
val |= index << mux->shift;
- writel(val, mux->reg);
+ clk_writel(val, mux->reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
@@ -104,9 +104,15 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
+const struct clk_ops clk_mux_ro_ops = {
+ .get_parent = clk_mux_get_parent,
+};
+EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
+
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
@@ -133,7 +139,10 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
}
init.name = name;
- init.ops = &clk_mux_ops;
+ if (clk_mux_flags & CLK_MUX_READ_ONLY)
+ init.ops = &clk_mux_ro_ops;
+ else
+ init.ops = &clk_mux_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = parent_names;
init.num_parents = num_parents;
@@ -154,6 +163,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
return clk;
}
+EXPORT_SYMBOL_GPL(clk_register_mux_table);
struct clk *clk_register_mux(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
@@ -166,3 +176,4 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
flags, reg, shift, mask, clk_mux_flags,
NULL, lock);
}
+EXPORT_SYMBOL_GPL(clk_register_mux);
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 6d819a3..51410c2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -479,12 +479,12 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np)
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
-static const __initconst struct of_device_id nomadik_src_match[] = {
+static const struct of_device_id nomadik_src_match[] __initconst = {
{ .compatible = "stericsson,nomadik-src" },
{ /* sentinel */ }
};
-static const __initconst struct of_device_id nomadik_src_clk_match[] = {
+static const struct of_device_id nomadik_src_clk_match[] __initconst = {
{
.compatible = "fixed-clock",
.data = of_fixed_clk_setup,
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index 643ca65..5ab95f1 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1034,7 +1034,7 @@ enum prima2_clk_index {
usb0, usb1, maxclk,
};
-static __initdata struct clk_hw* prima2_clk_hw_array[maxclk] = {
+static struct clk_hw *prima2_clk_hw_array[maxclk] __initdata = {
NULL, /* dummy */
NULL,
&clk_pll1.hw,
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
new file mode 100644
index 0000000..7be41e6
--- /dev/null
+++ b/drivers/clk/clk-s2mps11.c
@@ -0,0 +1,273 @@
+/*
+ * clk-s2mps11.c - Clock driver for S2MPS11.
+ *
+ * Copyright (C) 2013 Samsung Electornics
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/core.h>
+
+#define s2mps11_name(a) (a->hw.init->name)
+
+static struct clk **clk_table;
+static struct clk_onecell_data clk_data;
+
+enum {
+ S2MPS11_CLK_AP = 0,
+ S2MPS11_CLK_CP,
+ S2MPS11_CLK_BT,
+ S2MPS11_CLKS_NUM,
+};
+
+struct s2mps11_clk {
+ struct sec_pmic_dev *iodev;
+ struct clk_hw hw;
+ struct clk *clk;
+ struct clk_lookup *lookup;
+ u32 mask;
+ bool enabled;
+};
+
+static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct s2mps11_clk, hw);
+}
+
+static int s2mps11_clk_prepare(struct clk_hw *hw)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+ int ret;
+
+ ret = regmap_update_bits(s2mps11->iodev->regmap,
+ S2MPS11_REG_RTC_CTRL,
+ s2mps11->mask, s2mps11->mask);
+ if (!ret)
+ s2mps11->enabled = true;
+
+ return ret;
+}
+
+static void s2mps11_clk_unprepare(struct clk_hw *hw)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+ int ret;
+
+ ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
+ s2mps11->mask, ~s2mps11->mask);
+
+ if (!ret)
+ s2mps11->enabled = false;
+}
+
+static int s2mps11_clk_is_enabled(struct clk_hw *hw)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+
+ return s2mps11->enabled;
+}
+
+static unsigned long s2mps11_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
+ if (s2mps11->enabled)
+ return 32768;
+ else
+ return 0;
+}
+
+static struct clk_ops s2mps11_clk_ops = {
+ .prepare = s2mps11_clk_prepare,
+ .unprepare = s2mps11_clk_unprepare,
+ .is_enabled = s2mps11_clk_is_enabled,
+ .recalc_rate = s2mps11_clk_recalc_rate,
+};
+
+static struct clk_init_data s2mps11_clks_init[S2MPS11_CLKS_NUM] = {
+ [S2MPS11_CLK_AP] = {
+ .name = "s2mps11_ap",
+ .ops = &s2mps11_clk_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ [S2MPS11_CLK_CP] = {
+ .name = "s2mps11_cp",
+ .ops = &s2mps11_clk_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ [S2MPS11_CLK_BT] = {
+ .name = "s2mps11_bt",
+ .ops = &s2mps11_clk_ops,
+ .flags = CLK_IS_ROOT,
+ },
+};
+
+static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct device_node *clk_np;
+ int i;
+
+ if (!iodev->dev->of_node)
+ return NULL;
+
+ clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks");
+ if (!clk_np) {
+ dev_err(&pdev->dev, "could not find clock sub-node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ clk_table = devm_kzalloc(&pdev->dev, sizeof(struct clk *) *
+ S2MPS11_CLKS_NUM, GFP_KERNEL);
+ if (!clk_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+ of_property_read_string_index(clk_np, "clock-output-names", i,
+ &s2mps11_clks_init[i].name);
+
+ return clk_np;
+}
+
+static int s2mps11_clk_probe(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct s2mps11_clk *s2mps11_clks, *s2mps11_clk;
+ struct device_node *clk_np = NULL;
+ int i, ret = 0;
+ u32 val;
+
+ s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) *
+ S2MPS11_CLKS_NUM, GFP_KERNEL);
+ if (!s2mps11_clks)
+ return -ENOMEM;
+
+ s2mps11_clk = s2mps11_clks;
+
+ clk_np = s2mps11_clk_parse_dt(pdev);
+ if (IS_ERR(clk_np))
+ return PTR_ERR(clk_np);
+
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) {
+ s2mps11_clk->iodev = iodev;
+ s2mps11_clk->hw.init = &s2mps11_clks_init[i];
+ s2mps11_clk->mask = 1 << i;
+
+ ret = regmap_read(s2mps11_clk->iodev->regmap,
+ S2MPS11_REG_RTC_CTRL, &val);
+ if (ret < 0)
+ goto err_reg;
+
+ s2mps11_clk->enabled = val & s2mps11_clk->mask;
+
+ s2mps11_clk->clk = devm_clk_register(&pdev->dev,
+ &s2mps11_clk->hw);
+ if (IS_ERR(s2mps11_clk->clk)) {
+ dev_err(&pdev->dev, "Fail to register : %s\n",
+ s2mps11_name(s2mps11_clk));
+ ret = PTR_ERR(s2mps11_clk->clk);
+ goto err_reg;
+ }
+
+ s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk_lookup), GFP_KERNEL);
+ if (!s2mps11_clk->lookup) {
+ ret = -ENOMEM;
+ goto err_lup;
+ }
+
+ s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
+ s2mps11_clk->lookup->clk = s2mps11_clk->clk;
+
+ clkdev_add(s2mps11_clk->lookup);
+ }
+
+ if (clk_table) {
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+ clk_table[i] = s2mps11_clks[i].clk;
+
+ clk_data.clks = clk_table;
+ clk_data.clk_num = S2MPS11_CLKS_NUM;
+ of_clk_add_provider(clk_np, of_clk_src_onecell_get, &clk_data);
+ }
+
+ platform_set_drvdata(pdev, s2mps11_clks);
+
+ return ret;
+err_lup:
+ devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+err_reg:
+ while (s2mps11_clk > s2mps11_clks) {
+ if (s2mps11_clk->lookup) {
+ clkdev_drop(s2mps11_clk->lookup);
+ devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+ }
+ s2mps11_clk--;
+ }
+
+ return ret;
+}
+
+static int s2mps11_clk_remove(struct platform_device *pdev)
+{
+ struct s2mps11_clk *s2mps11_clks = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
+ clkdev_drop(s2mps11_clks[i].lookup);
+
+ return 0;
+}
+
+static const struct platform_device_id s2mps11_clk_id[] = {
+ { "s2mps11-clk", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
+
+static struct platform_driver s2mps11_clk_driver = {
+ .driver = {
+ .name = "s2mps11-clk",
+ .owner = THIS_MODULE,
+ },
+ .probe = s2mps11_clk_probe,
+ .remove = s2mps11_clk_remove,
+ .id_table = s2mps11_clk_id,
+};
+
+static int __init s2mps11_clk_init(void)
+{
+ return platform_driver_register(&s2mps11_clk_driver);
+}
+subsys_initcall(s2mps11_clk_init);
+
+static void __init s2mps11_clk_cleanup(void)
+{
+ platform_driver_unregister(&s2mps11_clk_driver);
+}
+module_exit(s2mps11_clk_cleanup);
+
+MODULE_DESCRIPTION("S2MPS11 Clock Driver");
+MODULE_AUTHOR("Yadwinder Singh Brar <yadi.brar@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index 8774e05..3efbdd0 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -746,7 +746,7 @@ struct u300_clock {
u16 clk_val;
};
-struct u300_clock const __initconst u300_clk_lookup[] = {
+static struct u300_clock const u300_clk_lookup[] __initconst = {
{
.type = U300_CLK_TYPE_REST,
.id = 3,
@@ -1151,7 +1151,7 @@ static void __init of_u300_syscon_mclk_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
-static const __initconst struct of_device_id u300_clk_match[] = {
+static const struct of_device_id u300_clk_match[] __initconst = {
{
.compatible = "fixed-clock",
.data = of_fixed_clk_setup,
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 1b3f8c9..805b4c3 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -31,7 +31,7 @@ struct wm831x_clk {
bool xtal_ena;
};
-static int wm831x_xtal_is_enabled(struct clk_hw *hw)
+static int wm831x_xtal_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
xtal_hw);
@@ -52,7 +52,7 @@ static unsigned long wm831x_xtal_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops wm831x_xtal_ops = {
- .is_enabled = wm831x_xtal_is_enabled,
+ .is_prepared = wm831x_xtal_is_prepared,
.recalc_rate = wm831x_xtal_recalc_rate,
};
@@ -73,7 +73,7 @@ static const unsigned long wm831x_fll_auto_rates[] = {
24576000,
};
-static int wm831x_fll_is_enabled(struct clk_hw *hw)
+static int wm831x_fll_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
fll_hw);
@@ -170,7 +170,7 @@ static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
if (i == ARRAY_SIZE(wm831x_fll_auto_rates))
return -EINVAL;
- if (wm831x_fll_is_enabled(hw))
+ if (wm831x_fll_is_prepared(hw))
return -EPERM;
return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_2,
@@ -220,7 +220,7 @@ static u8 wm831x_fll_get_parent(struct clk_hw *hw)
}
static const struct clk_ops wm831x_fll_ops = {
- .is_enabled = wm831x_fll_is_enabled,
+ .is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
.round_rate = wm831x_fll_round_rate,
@@ -237,7 +237,7 @@ static struct clk_init_data wm831x_fll_init = {
.flags = CLK_SET_RATE_GATE,
};
-static int wm831x_clkout_is_enabled(struct clk_hw *hw)
+static int wm831x_clkout_is_prepared(struct clk_hw *hw)
{
struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
clkout_hw);
@@ -335,7 +335,7 @@ static int wm831x_clkout_set_parent(struct clk_hw *hw, u8 parent)
}
static const struct clk_ops wm831x_clkout_ops = {
- .is_enabled = wm831x_clkout_is_enabled,
+ .is_prepared = wm831x_clkout_is_prepared,
.prepare = wm831x_clkout_prepare,
.unprepare = wm831x_clkout_unprepare,
.get_parent = wm831x_clkout_get_parent,
@@ -360,6 +360,8 @@ static int wm831x_clk_probe(struct platform_device *pdev)
if (!clkdata)
return -ENOMEM;
+ clkdata->wm831x = wm831x;
+
/* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
if (ret < 0) {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 54a191c..a004769 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -458,7 +458,6 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
clk->ops->unprepare(clk->hw);
}
}
-EXPORT_SYMBOL_GPL(__clk_get_flags);
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
@@ -559,6 +558,19 @@ struct clk *__clk_get_parent(struct clk *clk)
return !clk ? NULL : clk->parent;
}
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+ if (!clk || index >= clk->num_parents)
+ return NULL;
+ else if (!clk->parents)
+ return __clk_lookup(clk->parent_names[index]);
+ else if (!clk->parents[index])
+ return clk->parents[index] =
+ __clk_lookup(clk->parent_names[index]);
+ else
+ return clk->parents[index];
+}
+
unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->enable_count;
@@ -594,6 +606,7 @@ unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
}
+EXPORT_SYMBOL_GPL(__clk_get_flags);
bool __clk_is_prepared(struct clk *clk)
{
@@ -679,6 +692,55 @@ struct clk *__clk_lookup(const char *name)
return NULL;
}
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_p)
+{
+ struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+ int i, num_parents;
+ unsigned long parent_rate, best = 0;
+
+ /* if NO_REPARENT flag set, pass through to current parent */
+ if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
+ parent = clk->parent;
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ best = __clk_round_rate(parent, rate);
+ else if (parent)
+ best = __clk_get_rate(parent);
+ else
+ best = __clk_get_rate(clk);
+ goto out;
+ }
+
+ /* find the parent that can provide the fastest rate <= rate */
+ num_parents = clk->num_parents;
+ for (i = 0; i < num_parents; i++) {
+ parent = clk_get_parent_by_index(clk, i);
+ if (!parent)
+ continue;
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ parent_rate = __clk_round_rate(parent, rate);
+ else
+ parent_rate = __clk_get_rate(parent);
+ if (parent_rate <= rate && parent_rate > best) {
+ best_parent = parent;
+ best = parent_rate;
+ }
+ }
+
+out:
+ if (best_parent)
+ *best_parent_p = best_parent;
+ *best_parent_rate = best;
+
+ return best;
+}
+
/*** clk api ***/
void __clk_unprepare(struct clk *clk)
@@ -702,7 +764,7 @@ void __clk_unprepare(struct clk *clk)
/**
* clk_unprepare - undo preparation of a clock source
- * @clk: the clk being unprepare
+ * @clk: the clk being unprepared
*
* clk_unprepare may sleep, which differentiates it from clk_disable. In a
* simple case, clk_unprepare can be used instead of clk_disable to gate a clk
@@ -869,27 +931,31 @@ EXPORT_SYMBOL_GPL(clk_enable);
/**
* __clk_round_rate - round the given rate for a clk
* @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
*
* Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long parent_rate = 0;
+ struct clk *parent;
if (!clk)
return 0;
- if (!clk->ops->round_rate) {
- if (clk->flags & CLK_SET_RATE_PARENT)
- return __clk_round_rate(clk->parent, rate);
- else
- return clk->rate;
- }
-
- if (clk->parent)
- parent_rate = clk->parent->rate;
-
- return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+ parent = clk->parent;
+ if (parent)
+ parent_rate = parent->rate;
+
+ if (clk->ops->determine_rate)
+ return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
+ &parent);
+ else if (clk->ops->round_rate)
+ return clk->ops->round_rate(clk->hw, rate, &parent_rate);
+ else if (clk->flags & CLK_SET_RATE_PARENT)
+ return __clk_round_rate(clk->parent, rate);
+ else
+ return clk->rate;
}
/**
@@ -956,7 +1022,7 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
*
* Walks the subtree of clks starting with clk and recalculates rates as it
* goes. Note that if a clk does not implement the .recalc_rate callback then
- * it is assumed that the clock will take on the rate of it's parent.
+ * it is assumed that the clock will take on the rate of its parent.
*
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
@@ -1014,6 +1080,115 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_rate);
+static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+{
+ u8 i;
+
+ if (!clk->parents)
+ clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+ GFP_KERNEL);
+
+ /*
+ * find index of new parent clock using cached parent ptrs,
+ * or if not yet cached, use string name comparison and cache
+ * them now to avoid future calls to __clk_lookup.
+ */
+ for (i = 0; i < clk->num_parents; i++) {
+ if (clk->parents && clk->parents[i] == parent)
+ break;
+ else if (!strcmp(clk->parent_names[i], parent->name)) {
+ if (clk->parents)
+ clk->parents[i] = __clk_lookup(parent->name);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void clk_reparent(struct clk *clk, struct clk *new_parent)
+{
+ hlist_del(&clk->child_node);
+
+ if (new_parent) {
+ /* avoid duplicate POST_RATE_CHANGE notifications */
+ if (new_parent->new_child == clk)
+ new_parent->new_child = NULL;
+
+ hlist_add_head(&clk->child_node, &new_parent->children);
+ } else {
+ hlist_add_head(&clk->child_node, &clk_orphan_list);
+ }
+
+ clk->parent = new_parent;
+}
+
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct clk *old_parent = clk->parent;
+
+ /*
+ * Migrate prepare state between parents and prevent race with
+ * clk_enable().
+ *
+ * If the clock is not prepared, then a race with
+ * clk_enable/disable() is impossible since we already have the
+ * prepare lock (future calls to clk_enable() need to be preceded by
+ * a clk_prepare()).
+ *
+ * If the clock is prepared, migrate the prepared state to the new
+ * parent and also protect against a race with clk_enable() by
+ * forcing the clock and the new parent on. This ensures that all
+ * future calls to clk_enable() are practically NOPs with respect to
+ * hardware and software states.
+ *
+ * See also: Comment for clk_set_parent() below.
+ */
+ if (clk->prepare_count) {
+ __clk_prepare(parent);
+ clk_enable(parent);
+ clk_enable(clk);
+ }
+
+ /* update the clk tree topology */
+ flags = clk_enable_lock();
+ clk_reparent(clk, parent);
+ clk_enable_unlock(flags);
+
+ /* change clock input source */
+ if (parent && clk->ops->set_parent)
+ ret = clk->ops->set_parent(clk->hw, p_index);
+
+ if (ret) {
+ flags = clk_enable_lock();
+ clk_reparent(clk, old_parent);
+ clk_enable_unlock(flags);
+
+ if (clk->prepare_count) {
+ clk_disable(clk);
+ clk_disable(parent);
+ __clk_unprepare(parent);
+ }
+ return ret;
+ }
+
+ /*
+ * Finish the migration of prepare state and undo the changes done
+ * for preventing a race with clk_enable().
+ */
+ if (clk->prepare_count) {
+ clk_disable(clk);
+ clk_disable(old_parent);
+ __clk_unprepare(old_parent);
+ }
+
+ /* update debugfs with new clk tree topology */
+ clk_debug_reparent(clk, parent);
+ return 0;
+}
+
/**
* __clk_speculate_rates
* @clk: first clk in the subtree
@@ -1026,7 +1201,7 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
* pre-rate change notifications and returns early if no clks in the
* subtree have subscribed to the notifications. Note that if a clk does not
* implement the .recalc_rate callback then it is assumed that the clock will
- * take on the rate of it's parent.
+ * take on the rate of its parent.
*
* Caller must hold prepare_lock.
*/
@@ -1058,18 +1233,25 @@ out:
return ret;
}
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
+static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
+ struct clk *new_parent, u8 p_index)
{
struct clk *child;
clk->new_rate = new_rate;
+ clk->new_parent = new_parent;
+ clk->new_parent_index = p_index;
+ /* include clk in new parent's PRE_RATE_CHANGE notifications */
+ clk->new_child = NULL;
+ if (new_parent && new_parent != clk->parent)
+ new_parent->new_child = clk;
hlist_for_each_entry(child, &clk->children, child_node) {
if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else
child->new_rate = new_rate;
- clk_calc_subtree(child, child->new_rate);
+ clk_calc_subtree(child, child->new_rate, NULL, 0);
}
}
@@ -1080,50 +1262,63 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
+ struct clk *old_parent, *parent;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
+ u8 p_index = 0;
/* sanity */
if (IS_ERR_OR_NULL(clk))
return NULL;
/* save parent rate, if it exists */
- if (clk->parent)
- best_parent_rate = clk->parent->rate;
-
- /* never propagate up to the parent */
- if (!(clk->flags & CLK_SET_RATE_PARENT)) {
- if (!clk->ops->round_rate) {
- clk->new_rate = clk->rate;
- return NULL;
- }
- new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+ parent = old_parent = clk->parent;
+ if (parent)
+ best_parent_rate = parent->rate;
+
+ /* find the closest rate and parent clk/rate */
+ if (clk->ops->determine_rate) {
+ new_rate = clk->ops->determine_rate(clk->hw, rate,
+ &best_parent_rate,
+ &parent);
+ } else if (clk->ops->round_rate) {
+ new_rate = clk->ops->round_rate(clk->hw, rate,
+ &best_parent_rate);
+ } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
+ /* pass-through clock without adjustable parent */
+ clk->new_rate = clk->rate;
+ return NULL;
+ } else {
+ /* pass-through clock with adjustable parent */
+ top = clk_calc_new_rates(parent, rate);
+ new_rate = parent->new_rate;
goto out;
}
- /* need clk->parent from here on out */
- if (!clk->parent) {
- pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
+ /* some clocks must be gated to change parent */
+ if (parent != old_parent &&
+ (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+ pr_debug("%s: %s not gated but wants to reparent\n",
+ __func__, clk->name);
return NULL;
}
- if (!clk->ops->round_rate) {
- top = clk_calc_new_rates(clk->parent, rate);
- new_rate = clk->parent->new_rate;
-
- goto out;
+ /* try finding the new parent index */
+ if (parent) {
+ p_index = clk_fetch_parent_index(clk, parent);
+ if (p_index == clk->num_parents) {
+ pr_debug("%s: clk %s can not be parent of clk %s\n",
+ __func__, parent->name, clk->name);
+ return NULL;
+ }
}
- new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
-
- if (best_parent_rate != clk->parent->rate) {
- top = clk_calc_new_rates(clk->parent, best_parent_rate);
-
- goto out;
- }
+ if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
+ best_parent_rate != parent->rate)
+ top = clk_calc_new_rates(parent, best_parent_rate);
out:
- clk_calc_subtree(clk, new_rate);
+ clk_calc_subtree(clk, new_rate, parent, p_index);
return top;
}
@@ -1135,7 +1330,7 @@ out:
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
- struct clk *child, *fail_clk = NULL;
+ struct clk *child, *tmp_clk, *fail_clk = NULL;
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
@@ -1148,9 +1343,19 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
}
hlist_for_each_entry(child, &clk->children, child_node) {
- clk = clk_propagate_rate_change(child, event);
- if (clk)
- fail_clk = clk;
+ /* Skip children who will be reparented to another clock */
+ if (child->new_parent && child->new_parent != clk)
+ continue;
+ tmp_clk = clk_propagate_rate_change(child, event);
+ if (tmp_clk)
+ fail_clk = tmp_clk;
+ }
+
+ /* handle the new child who might not be in clk->children yet */
+ if (clk->new_child) {
+ tmp_clk = clk_propagate_rate_change(clk->new_child, event);
+ if (tmp_clk)
+ fail_clk = tmp_clk;
}
return fail_clk;
@@ -1168,6 +1373,10 @@ static void clk_change_rate(struct clk *clk)
old_rate = clk->rate;
+ /* set parent */
+ if (clk->new_parent && clk->new_parent != clk->parent)
+ __clk_set_parent(clk, clk->new_parent, clk->new_parent_index);
+
if (clk->parent)
best_parent_rate = clk->parent->rate;
@@ -1182,8 +1391,16 @@ static void clk_change_rate(struct clk *clk)
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
- hlist_for_each_entry(child, &clk->children, child_node)
+ hlist_for_each_entry(child, &clk->children, child_node) {
+ /* Skip children who will be reparented to another clock */
+ if (child->new_parent && child->new_parent != clk)
+ continue;
clk_change_rate(child);
+ }
+
+ /* handle the new child who might not be in clk->children yet */
+ if (clk->new_child)
+ clk_change_rate(clk->new_child);
}
/**
@@ -1198,7 +1415,7 @@ static void clk_change_rate(struct clk *clk)
* outcome of clk's .round_rate implementation. If *parent_rate is unchanged
* after calling .round_rate then upstream parent propagation is ignored. If
* *parent_rate comes back with a new rate for clk's parent then we propagate
- * up to clk's parent and set it's rate. Upward propagation will continue
+ * up to clk's parent and set its rate. Upward propagation will continue
* until either a clk does not support the CLK_SET_RATE_PARENT flag or
* .round_rate stops requesting changes to clk's parent_rate.
*
@@ -1212,6 +1429,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
struct clk *top, *fail_clk;
int ret = 0;
+ if (!clk)
+ return 0;
+
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
@@ -1315,30 +1535,12 @@ static struct clk *__clk_init_parent(struct clk *clk)
kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
- if (!clk->parents)
- ret = __clk_lookup(clk->parent_names[index]);
- else if (!clk->parents[index])
- ret = clk->parents[index] =
- __clk_lookup(clk->parent_names[index]);
- else
- ret = clk->parents[index];
+ ret = clk_get_parent_by_index(clk, index);
out:
return ret;
}
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
-{
- hlist_del(&clk->child_node);
-
- if (new_parent)
- hlist_add_head(&clk->child_node, &new_parent->children);
- else
- hlist_add_head(&clk->child_node, &clk_orphan_list);
-
- clk->parent = new_parent;
-}
-
void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
clk_reparent(clk, new_parent);
@@ -1346,98 +1548,6 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
-static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
-{
- u8 i;
-
- if (!clk->parents)
- clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
- GFP_KERNEL);
-
- /*
- * find index of new parent clock using cached parent ptrs,
- * or if not yet cached, use string name comparison and cache
- * them now to avoid future calls to __clk_lookup.
- */
- for (i = 0; i < clk->num_parents; i++) {
- if (clk->parents && clk->parents[i] == parent)
- break;
- else if (!strcmp(clk->parent_names[i], parent->name)) {
- if (clk->parents)
- clk->parents[i] = __clk_lookup(parent->name);
- break;
- }
- }
-
- return i;
-}
-
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
-{
- unsigned long flags;
- int ret = 0;
- struct clk *old_parent = clk->parent;
-
- /*
- * Migrate prepare state between parents and prevent race with
- * clk_enable().
- *
- * If the clock is not prepared, then a race with
- * clk_enable/disable() is impossible since we already have the
- * prepare lock (future calls to clk_enable() need to be preceded by
- * a clk_prepare()).
- *
- * If the clock is prepared, migrate the prepared state to the new
- * parent and also protect against a race with clk_enable() by
- * forcing the clock and the new parent on. This ensures that all
- * future calls to clk_enable() are practically NOPs with respect to
- * hardware and software states.
- *
- * See also: Comment for clk_set_parent() below.
- */
- if (clk->prepare_count) {
- __clk_prepare(parent);
- clk_enable(parent);
- clk_enable(clk);
- }
-
- /* update the clk tree topology */
- flags = clk_enable_lock();
- clk_reparent(clk, parent);
- clk_enable_unlock(flags);
-
- /* change clock input source */
- if (parent && clk->ops->set_parent)
- ret = clk->ops->set_parent(clk->hw, p_index);
-
- if (ret) {
- flags = clk_enable_lock();
- clk_reparent(clk, old_parent);
- clk_enable_unlock(flags);
-
- if (clk->prepare_count) {
- clk_disable(clk);
- clk_disable(parent);
- __clk_unprepare(parent);
- }
- return ret;
- }
-
- /*
- * Finish the migration of prepare state and undo the changes done
- * for preventing a race with clk_enable().
- */
- if (clk->prepare_count) {
- clk_disable(clk);
- clk_disable(old_parent);
- __clk_unprepare(old_parent);
- }
-
- /* update debugfs with new clk tree topology */
- clk_debug_reparent(clk, parent);
- return 0;
-}
-
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
@@ -1461,7 +1571,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
u8 p_index = 0;
unsigned long p_rate = 0;
- if (!clk || !clk->ops)
+ if (!clk)
+ return 0;
+
+ if (!clk->ops)
return -EINVAL;
/* verify ops for for multi-parent clks */
@@ -1544,8 +1657,9 @@ int __clk_init(struct device *dev, struct clk *clk)
/* check that clk_ops are sane. See Documentation/clk.txt */
if (clk->ops->set_rate &&
- !(clk->ops->round_rate && clk->ops->recalc_rate)) {
- pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+ !((clk->ops->round_rate || clk->ops->determine_rate) &&
+ clk->ops->recalc_rate)) {
+ pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
__func__, clk->name);
ret = -EINVAL;
goto out;
@@ -1628,7 +1742,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* this clock
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
- if (orphan->ops->get_parent) {
+ if (orphan->num_parents && orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i]))
__clk_reparent(orphan, clk);
@@ -1648,7 +1762,7 @@ int __clk_init(struct device *dev, struct clk *clk)
* The .init callback is not used by any of the basic clock types, but
* exists for weird hardware that must perform initialization magic.
* Please consider other ways of solving initialization problems before
- * using this callback, as it's use is discouraged.
+ * using this callback, as its use is discouraged.
*/
if (clk->ops->init)
clk->ops->init(clk->hw);
@@ -1675,7 +1789,7 @@ out:
* very large numbers of clocks that need to be statically initialized. It is
* a layering violation to include clk-private.h from any code which implements
* a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations. Returns 0
+ * separate C file from the logic that implements its operations. Returns 0
* on success, otherwise an error code.
*/
struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
@@ -2115,13 +2229,13 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
*/
void __init of_clk_init(const struct of_device_id *matches)
{
+ const struct of_device_id *match;
struct device_node *np;
if (!matches)
matches = __clk_of_table;
- for_each_matching_node(np, matches) {
- const struct of_device_id *match = of_match_node(matches, np);
+ for_each_matching_node_and_match(np, matches, &match) {
of_clk_init_cb_t clk_init_cb = match->data;
clk_init_cb(np);
}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index d1f1a19..b2721ca 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -248,7 +248,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp2-pwm.3");
clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -258,7 +259,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -268,7 +270,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART2, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -278,7 +281,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
clk = clk_register_mux(NULL, "uart3_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART3, 4, 3, 0, &clk_lock);
clk_set_parent(clk, vctcxo);
clk_register_clkdev(clk, "uart_mux.3", NULL);
@@ -288,7 +292,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.3");
clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -297,7 +302,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.0");
clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.1", NULL);
@@ -306,7 +312,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.1");
clk = clk_register_mux(NULL, "ssp2_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP2, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.2", NULL);
@@ -315,7 +322,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.2");
clk = clk_register_mux(NULL, "ssp3_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP3, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.3", NULL);
@@ -324,7 +332,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.3");
clk = clk_register_mux(NULL, "sdh_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH0, 8, 2, 0, &clk_lock);
clk_register_clkdev(clk, "sdh_mux", NULL);
@@ -354,7 +363,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "usb_clk", NULL);
clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP0, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.0", NULL);
@@ -376,7 +386,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "disp_sphy.0", NULL);
clk = clk_register_mux(NULL, "disp1_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP1, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.1", NULL);
@@ -394,7 +405,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "ccic_arbiter", NULL);
clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC0, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.0", NULL);
@@ -421,7 +433,8 @@ void __init mmp2_clk_init(void)
clk_register_clkdev(clk, "sphyclk", "mmp-ccic.0");
clk = clk_register_mux(NULL, "ccic1_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC1, 6, 2, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.1", NULL);
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 28b3b51..014396b 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -199,7 +199,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa168-pwm.3");
clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -209,7 +210,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -219,7 +221,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART2, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -229,7 +232,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -238,7 +242,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.0");
clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.1", NULL);
@@ -247,7 +252,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.1");
clk = clk_register_mux(NULL, "ssp2_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP2, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.2", NULL);
@@ -256,7 +262,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.2");
clk = clk_register_mux(NULL, "ssp3_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP3, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.3", NULL);
@@ -265,7 +272,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.3");
clk = clk_register_mux(NULL, "ssp4_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP4, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.4", NULL);
@@ -278,7 +286,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa3xx-nand.0");
clk = clk_register_mux(NULL, "sdh0_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh0_mux", NULL);
@@ -287,7 +296,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, NULL, "sdhci-pxa.0");
clk = clk_register_mux(NULL, "sdh1_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH1, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh1_mux", NULL);
@@ -304,7 +314,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, "sph_clk", NULL);
clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.0", NULL);
@@ -317,7 +328,8 @@ void __init pxa168_clk_init(void)
clk_register_clkdev(clk, "hclk", "mmp-disp.0");
clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.0", NULL);
@@ -327,8 +339,8 @@ void __init pxa168_clk_init(void)
clk = clk_register_mux(NULL, "ccic0_phy_mux", ccic_phy_parent,
ARRAY_SIZE(ccic_phy_parent),
- CLK_SET_RATE_PARENT, apmu_base + APMU_CCIC0,
- 7, 1, 0, &clk_lock);
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ apmu_base + APMU_CCIC0, 7, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_phy_mux.0", NULL);
clk = mmp_clk_register_apmu("ccic0_phy", "ccic0_phy_mux",
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 6ec0569..9efc6a4 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -204,7 +204,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa910-pwm.3");
clk = clk_register_mux(NULL, "uart0_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART0, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -214,7 +215,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.0");
clk = clk_register_mux(NULL, "uart1_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_UART1, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.1", NULL);
@@ -224,7 +226,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.1");
clk = clk_register_mux(NULL, "uart2_mux", uart_parent,
- ARRAY_SIZE(uart_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbcp_base + APBCP_UART2, 4, 3, 0, &clk_lock);
clk_set_parent(clk, uart_pll);
clk_register_clkdev(clk, "uart_mux.2", NULL);
@@ -234,7 +237,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa2xx-uart.2");
clk = clk_register_mux(NULL, "ssp0_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP0, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "uart_mux.0", NULL);
@@ -243,7 +247,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-ssp.0");
clk = clk_register_mux(NULL, "ssp1_mux", ssp_parent,
- ARRAY_SIZE(ssp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ssp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apbc_base + APBC_SSP1, 4, 3, 0, &clk_lock);
clk_register_clkdev(clk, "ssp_mux.1", NULL);
@@ -256,7 +261,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "pxa3xx-nand.0");
clk = clk_register_mux(NULL, "sdh0_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh0_mux", NULL);
@@ -265,7 +271,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "sdhci-pxa.0");
clk = clk_register_mux(NULL, "sdh1_mux", sdh_parent,
- ARRAY_SIZE(sdh_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdh_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_SDH1, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "sdh1_mux", NULL);
@@ -282,7 +289,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, "sph_clk", NULL);
clk = clk_register_mux(NULL, "disp0_mux", disp_parent,
- ARRAY_SIZE(disp_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(disp_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_DISP0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "disp_mux.0", NULL);
@@ -291,7 +299,8 @@ void __init pxa910_clk_init(void)
clk_register_clkdev(clk, NULL, "mmp-disp.0");
clk = clk_register_mux(NULL, "ccic0_mux", ccic_parent,
- ARRAY_SIZE(ccic_parent), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(ccic_parent),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
apmu_base + APMU_CCIC0, 6, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_mux.0", NULL);
@@ -301,8 +310,8 @@ void __init pxa910_clk_init(void)
clk = clk_register_mux(NULL, "ccic0_phy_mux", ccic_phy_parent,
ARRAY_SIZE(ccic_phy_parent),
- CLK_SET_RATE_PARENT, apmu_base + APMU_CCIC0,
- 7, 1, 0, &clk_lock);
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ apmu_base + APMU_CCIC0, 7, 1, 0, &clk_lock);
clk_register_clkdev(clk, "ccic_phy_mux.0", NULL);
clk = mmp_clk_register_apmu("ccic0_phy", "ccic0_phy_mux",
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 079960e..fc777bd 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -32,13 +32,13 @@
enum { A370_CPU_TO_NBCLK, A370_CPU_TO_HCLK, A370_CPU_TO_DRAMCLK };
-static const struct coreclk_ratio __initconst a370_coreclk_ratios[] = {
+static const struct coreclk_ratio a370_coreclk_ratios[] __initconst = {
{ .id = A370_CPU_TO_NBCLK, .name = "nbclk" },
{ .id = A370_CPU_TO_HCLK, .name = "hclk" },
{ .id = A370_CPU_TO_DRAMCLK, .name = "dramclk" },
};
-static const u32 __initconst a370_tclk_freqs[] = {
+static const u32 a370_tclk_freqs[] __initconst = {
16600000,
20000000,
};
@@ -52,7 +52,7 @@ static u32 __init a370_get_tclk_freq(void __iomem *sar)
return a370_tclk_freqs[tclk_freq_select];
}
-static const u32 __initconst a370_cpu_freqs[] = {
+static const u32 a370_cpu_freqs[] __initconst = {
400000000,
533000000,
667000000,
@@ -78,7 +78,7 @@ static u32 __init a370_get_cpu_freq(void __iomem *sar)
return cpu_freq;
}
-static const int __initconst a370_nbclk_ratios[32][2] = {
+static const int a370_nbclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 2}, {2, 2},
{1, 2}, {1, 2}, {1, 1}, {2, 3},
{0, 1}, {1, 2}, {2, 4}, {0, 1},
@@ -89,7 +89,7 @@ static const int __initconst a370_nbclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst a370_hclk_ratios[32][2] = {
+static const int a370_hclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 6}, {2, 3},
{1, 3}, {1, 4}, {1, 2}, {2, 6},
{0, 1}, {1, 6}, {2, 10}, {0, 1},
@@ -100,7 +100,7 @@ static const int __initconst a370_hclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst a370_dramclk_ratios[32][2] = {
+static const int a370_dramclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 3}, {2, 3},
{1, 3}, {1, 2}, {1, 2}, {2, 6},
{0, 1}, {1, 3}, {2, 5}, {0, 1},
@@ -152,7 +152,7 @@ CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst a370_gating_desc[] = {
+static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
{ "audio", NULL, 0, 0 },
{ "pex0_en", NULL, 1, 0 },
{ "pex1_en", NULL, 2, 0 },
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 13b62ce..9922c44 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -40,7 +40,7 @@
enum { AXP_CPU_TO_NBCLK, AXP_CPU_TO_HCLK, AXP_CPU_TO_DRAMCLK };
-static const struct coreclk_ratio __initconst axp_coreclk_ratios[] = {
+static const struct coreclk_ratio axp_coreclk_ratios[] __initconst = {
{ .id = AXP_CPU_TO_NBCLK, .name = "nbclk" },
{ .id = AXP_CPU_TO_HCLK, .name = "hclk" },
{ .id = AXP_CPU_TO_DRAMCLK, .name = "dramclk" },
@@ -52,7 +52,7 @@ static u32 __init axp_get_tclk_freq(void __iomem *sar)
return 250000000;
}
-static const u32 __initconst axp_cpu_freqs[] = {
+static const u32 axp_cpu_freqs[] __initconst = {
1000000000,
1066000000,
1200000000,
@@ -89,7 +89,7 @@ static u32 __init axp_get_cpu_freq(void __iomem *sar)
return cpu_freq;
}
-static const int __initconst axp_nbclk_ratios[32][2] = {
+static const int axp_nbclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 2}, {2, 2},
{1, 2}, {1, 2}, {1, 1}, {2, 3},
{0, 1}, {1, 2}, {2, 4}, {0, 1},
@@ -100,7 +100,7 @@ static const int __initconst axp_nbclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst axp_hclk_ratios[32][2] = {
+static const int axp_hclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 6}, {2, 3},
{1, 3}, {1, 4}, {1, 2}, {2, 6},
{0, 1}, {1, 6}, {2, 10}, {0, 1},
@@ -111,7 +111,7 @@ static const int __initconst axp_hclk_ratios[32][2] = {
{0, 1}, {0, 1}, {0, 1}, {0, 1},
};
-static const int __initconst axp_dramclk_ratios[32][2] = {
+static const int axp_dramclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 3}, {2, 3},
{1, 3}, {1, 2}, {1, 2}, {2, 6},
{0, 1}, {1, 3}, {2, 5}, {0, 1},
@@ -169,7 +169,7 @@ CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst axp_gating_desc[] = {
+static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
{ "audio", NULL, 0, 0 },
{ "ge3", NULL, 1, 0 },
{ "ge2", NULL, 2, 0 },
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index b0fbc07..1466865 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -119,7 +119,7 @@ void __init of_cpu_clk_setup(struct device_node *node)
cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
if (WARN_ON(!cpuclk))
- return;
+ goto cpuclk_out;
clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
if (WARN_ON(!clks))
@@ -170,6 +170,8 @@ bail_out:
kfree(cpuclk[ncpus].clk_name);
clks_out:
kfree(cpuclk);
+cpuclk_out:
+ iounmap(clock_complex_base);
}
CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index adaa4a1..25ceccf 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -45,8 +45,10 @@ void __init mvebu_coreclk_setup(struct device_node *np,
clk_data.clk_num = 2 + desc->num_ratios;
clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
GFP_KERNEL);
- if (WARN_ON(!clk_data.clks))
+ if (WARN_ON(!clk_data.clks)) {
+ iounmap(base);
return;
+ }
/* Register TCLK */
of_property_read_string_index(np, "clock-output-names", 0,
@@ -134,7 +136,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (WARN_ON(!ctrl))
- return;
+ goto ctrl_out;
spin_lock_init(&ctrl->lock);
@@ -145,10 +147,8 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
ctrl->num_gates = n;
ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
GFP_KERNEL);
- if (WARN_ON(!ctrl->gates)) {
- kfree(ctrl);
- return;
- }
+ if (WARN_ON(!ctrl->gates))
+ goto gates_out;
for (n = 0; n < ctrl->num_gates; n++) {
const char *parent =
@@ -160,4 +160,10 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
}
of_clk_add_provider(np, clk_gating_get_src, ctrl);
+
+ return;
+gates_out:
+ kfree(ctrl);
+ctrl_out:
+ iounmap(base);
}
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 79d7aed..38aee1e 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -74,12 +74,12 @@
enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR };
-static const struct coreclk_ratio __initconst dove_coreclk_ratios[] = {
+static const struct coreclk_ratio dove_coreclk_ratios[] __initconst = {
{ .id = DOVE_CPU_TO_L2, .name = "l2clk", },
{ .id = DOVE_CPU_TO_DDR, .name = "ddrclk", }
};
-static const u32 __initconst dove_tclk_freqs[] = {
+static const u32 dove_tclk_freqs[] __initconst = {
166666667,
125000000,
0, 0
@@ -92,7 +92,7 @@ static u32 __init dove_get_tclk_freq(void __iomem *sar)
return dove_tclk_freqs[opt];
}
-static const u32 __initconst dove_cpu_freqs[] = {
+static const u32 dove_cpu_freqs[] __initconst = {
0, 0, 0, 0, 0,
1000000000,
933333333, 933333333,
@@ -111,12 +111,12 @@ static u32 __init dove_get_cpu_freq(void __iomem *sar)
return dove_cpu_freqs[opt];
}
-static const int __initconst dove_cpu_l2_ratios[8][2] = {
+static const int dove_cpu_l2_ratios[8][2] __initconst = {
{ 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }
};
-static const int __initconst dove_cpu_ddr_ratios[16][2] = {
+static const int dove_cpu_ddr_ratios[16][2] __initconst = {
{ 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 },
{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 },
{ 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 },
@@ -164,7 +164,7 @@ CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst dove_gating_desc[] = {
+static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = {
{ "usb0", NULL, 0, 0 },
{ "usb1", NULL, 1, 0 },
{ "ge", "gephy", 2, 0 },
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 71d2461..2636a55 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -78,7 +78,7 @@
enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR };
-static const struct coreclk_ratio __initconst kirkwood_coreclk_ratios[] = {
+static const struct coreclk_ratio kirkwood_coreclk_ratios[] __initconst = {
{ .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", },
{ .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", }
};
@@ -90,7 +90,7 @@ static u32 __init kirkwood_get_tclk_freq(void __iomem *sar)
return (opt) ? 166666667 : 200000000;
}
-static const u32 __initconst kirkwood_cpu_freqs[] = {
+static const u32 kirkwood_cpu_freqs[] __initconst = {
0, 0, 0, 0,
600000000,
0,
@@ -111,12 +111,12 @@ static u32 __init kirkwood_get_cpu_freq(void __iomem *sar)
return kirkwood_cpu_freqs[opt];
}
-static const int __initconst kirkwood_cpu_l2_ratios[8][2] = {
+static const int kirkwood_cpu_l2_ratios[8][2] __initconst = {
{ 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 },
{ 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 }
};
-static const int __initconst kirkwood_cpu_ddr_ratios[16][2] = {
+static const int kirkwood_cpu_ddr_ratios[16][2] __initconst = {
{ 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
{ 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 },
{ 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 },
@@ -145,7 +145,7 @@ static void __init kirkwood_get_clk_ratio(
}
}
-static const u32 __initconst mv88f6180_cpu_freqs[] = {
+static const u32 mv88f6180_cpu_freqs[] __initconst = {
0, 0, 0, 0, 0,
600000000,
800000000,
@@ -158,7 +158,7 @@ static u32 __init mv88f6180_get_cpu_freq(void __iomem *sar)
return mv88f6180_cpu_freqs[opt];
}
-static const int __initconst mv88f6180_cpu_ddr_ratios[8][2] = {
+static const int mv88f6180_cpu_ddr_ratios[8][2] __initconst = {
{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
{ 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 }
};
@@ -219,7 +219,7 @@ CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
* Clock Gating Control
*/
-static const struct clk_gating_soc_desc __initconst kirkwood_gating_desc[] = {
+static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = {
{ "ge0", NULL, 0, 0 },
{ "pex0", NULL, 2, 0 },
{ "usb0", NULL, 3, 0 },
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index f6a7487..c396fe3 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk/mxs.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
index 81421e2..ef10ad9 100644
--- a/drivers/clk/mxs/clk.h
+++ b/drivers/clk/mxs/clk.h
@@ -52,8 +52,8 @@ static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parent_names, int num_parents)
{
return clk_register_mux(NULL, name, parent_names, num_parents,
- CLK_SET_RATE_PARENT, reg, shift, width,
- 0, &mxs_lock);
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ reg, shift, width, 0, &mxs_lock);
}
static inline struct clk *mxs_clk_fixed_factor(const char *name,
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 5d4d432..3413380 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -8,3 +8,6 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
+ifdef CONFIG_COMMON_CLK
+obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
+endif
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 9b1bbd5..39b40aa 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -62,7 +62,7 @@ static struct syscore_ops exynos_audss_clk_syscore_ops = {
#endif /* CONFIG_PM_SLEEP */
/* register exynos_audss clocks */
-void __init exynos_audss_clk_init(struct device_node *np)
+static void __init exynos_audss_clk_init(struct device_node *np)
{
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -82,11 +82,13 @@ void __init exynos_audss_clk_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
- mout_audss_p, ARRAY_SIZE(mout_audss_p), 0,
+ mout_audss_p, ARRAY_SIZE(mout_audss_p),
+ CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s",
- mout_i2s_p, ARRAY_SIZE(mout_i2s_p), 0,
+ mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
+ CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp",
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4e57397..ad5ff50 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -17,7 +17,6 @@
#include <linux/of_address.h>
#include "clk.h"
-#include "clk-pll.h"
/* Exynos4 clock controller register offsets */
#define SRC_LEFTBUS 0x4200
@@ -97,12 +96,15 @@
#define GATE_IP_PERIL 0xc950
#define E4210_GATE_IP_PERIR 0xc960
#define GATE_BLOCK 0xc970
+#define E4X12_MPLL_LOCK 0x10008
#define E4X12_MPLL_CON0 0x10108
#define SRC_DMC 0x10200
#define SRC_MASK_DMC 0x10300
#define DIV_DMC0 0x10500
#define DIV_DMC1 0x10504
#define GATE_IP_DMC 0x10900
+#define APLL_LOCK 0x14000
+#define E4210_MPLL_LOCK 0x14008
#define APLL_CON0 0x14100
#define E4210_MPLL_CON0 0x14108
#define SRC_CPU 0x14200
@@ -121,6 +123,12 @@ enum exynos4_soc {
EXYNOS4X12,
};
+/* list of PLLs to be registered */
+enum exynos4_plls {
+ apll, mpll, epll, vpll,
+ nr_plls /* number of PLLs */
+};
+
/*
* Let each supported clock get a unique id. This id is used to lookup the clock
* for device tree based platforms. The clocks are categorized into three
@@ -169,7 +177,7 @@ enum exynos4_clks {
gicisp, smmu_isp, smmu_drc, smmu_fd, smmu_lite0, smmu_lite1, mcuctl_isp,
mpwm_isp, i2c0_isp, i2c1_isp, mtcadc_isp, pwm_isp, wdt_isp, uart_isp,
asyncaxim, smmu_ispcx, spi0_isp, spi1_isp, pwm_isp_sclk, spi0_isp_sclk,
- spi1_isp_sclk, uart_isp_sclk,
+ spi1_isp_sclk, uart_isp_sclk, tmu_apbif,
/* mux clocks */
mout_fimc0 = 384, mout_fimc1, mout_fimc2, mout_fimc3, mout_cam0,
@@ -187,7 +195,7 @@ enum exynos4_clks {
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static __initdata unsigned long exynos4210_clk_save[] = {
+static unsigned long exynos4210_clk_save[] __initdata = {
E4210_SRC_IMAGE,
E4210_SRC_LCD1,
E4210_SRC_MASK_LCD1,
@@ -198,7 +206,7 @@ static __initdata unsigned long exynos4210_clk_save[] = {
E4210_MPLL_CON0,
};
-static __initdata unsigned long exynos4x12_clk_save[] = {
+static unsigned long exynos4x12_clk_save[] __initdata = {
E4X12_GATE_IP_IMAGE,
E4X12_GATE_IP_PERIR,
E4X12_SRC_CAM1,
@@ -207,7 +215,7 @@ static __initdata unsigned long exynos4x12_clk_save[] = {
E4X12_MPLL_CON0,
};
-static __initdata unsigned long exynos4_clk_regs[] = {
+static unsigned long exynos4_clk_regs[] __initdata = {
SRC_LEFTBUS,
DIV_LEFTBUS,
GATE_IP_LEFTBUS,
@@ -338,24 +346,24 @@ PNAME(mout_user_aclk200_p4x12) = {"fin_pll", "div_aclk200", };
PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
FRATE(xxti, "xxti", NULL, CLK_IS_ROOT, 0),
FRATE(xusbxti, "xusbxti", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
FRATE(none, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
FRATE(none, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
};
-struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
FRATE(none, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
};
/* list of mux clocks supported in all exynos4 soc's */
-struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
MUX_FA(mout_apll, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
CLK_SET_RATE_PARENT, 0, "mout_apll"),
MUX(none, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
@@ -367,17 +375,20 @@ struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
MUX(none, "mout_spdif", mout_spdif_p, SRC_PERIL1, 8, 2),
MUX(none, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
- MUX_A(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1, "sclk_epll"),
+ MUX(sclk_epll, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
MUX(none, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
};
/* list of mux clocks supported in exynos4210 soc */
-struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos4210_mux_early[] __initdata = {
+ MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+};
+
+static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
MUX(none, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
MUX(none, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
MUX(none, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
MUX(none, "mout_aclk133", sclk_ampll_p4210, SRC_TOP0, 24, 1),
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
MUX(none, "mout_mixer", mout_mixer_p4210, SRC_TV, 4, 1),
MUX(none, "mout_dac", mout_dac_p4210, SRC_TV, 8, 1),
MUX(none, "mout_g2d0", sclk_ampll_p4210, E4210_SRC_IMAGE, 0, 1),
@@ -385,11 +396,9 @@ struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
MUX(none, "mout_g2d", mout_g2d_p, E4210_SRC_IMAGE, 8, 1),
MUX(none, "mout_fimd1", group1_p4210, E4210_SRC_LCD1, 0, 4),
MUX(none, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
- MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1, "mout_mpll"),
- MUX_A(mout_core, "mout_core", mout_core_p4210,
- SRC_CPU, 16, 1, "moutcore"),
- MUX_A(sclk_vpll, "sclk_vpll", sclk_vpll_p4210,
- SRC_TOP0, 8, 1, "sclk_vpll"),
+ MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
+ MUX(mout_core, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
+ MUX(sclk_vpll, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
MUX(mout_fimc0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
MUX(mout_fimc1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
MUX(mout_fimc2, "mout_fimc2", group1_p4210, SRC_CAM, 8, 4),
@@ -423,9 +432,9 @@ struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
};
/* list of mux clocks supported in exynos4x12 soc */
-struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
- MUX_A(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
- SRC_CPU, 24, 1, "mout_mpll"),
+static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
+ MUX(mout_mpll_user_c, "mout_mpll_user_c", mout_mpll_user_p4x12,
+ SRC_CPU, 24, 1),
MUX(none, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
MUX(none, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
MUX(mout_mpll_user_t, "mout_mpll_user_t", mout_mpll_user_p4x12,
@@ -445,12 +454,9 @@ struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
MUX(none, "mout_jpeg0", sclk_ampll_p4x12, E4X12_SRC_CAM1, 0, 1),
MUX(none, "mout_jpeg1", sclk_evpll_p, E4X12_SRC_CAM1, 4, 1),
MUX(none, "mout_jpeg", mout_jpeg_p, E4X12_SRC_CAM1, 8, 1),
- MUX_A(sclk_mpll, "sclk_mpll", mout_mpll_p,
- SRC_DMC, 12, 1, "sclk_mpll"),
- MUX_A(sclk_vpll, "sclk_vpll", mout_vpll_p,
- SRC_TOP0, 8, 1, "sclk_vpll"),
- MUX_A(mout_core, "mout_core", mout_core_p4x12,
- SRC_CPU, 16, 1, "moutcore"),
+ MUX(sclk_mpll, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
+ MUX(sclk_vpll, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
+ MUX(mout_core, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
MUX(mout_fimc0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
MUX(mout_fimc1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
MUX(mout_fimc2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
@@ -491,7 +497,7 @@ struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
};
/* list of divider clocks supported in all exynos4 soc's */
-struct samsung_div_clock exynos4_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4_div_clks[] __initdata = {
DIV(none, "div_core", "mout_core", DIV_CPU0, 0, 3),
DIV(none, "div_core2", "div_core", DIV_CPU0, 28, 3),
DIV(none, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
@@ -538,9 +544,8 @@ struct samsung_div_clock exynos4_div_clks[] __initdata = {
DIV(none, "div_spi_pre2", "div_spi2", DIV_PERIL2, 8, 8),
DIV(none, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
DIV(none, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
- DIV_A(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3, "armclk"),
- DIV_A(sclk_apll, "sclk_apll", "mout_apll",
- DIV_CPU0, 24, 3, "sclk_apll"),
+ DIV(arm_clk, "arm_clk", "div_core2", DIV_CPU0, 28, 3),
+ DIV(sclk_apll, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV_F(none, "div_mipi_pre0", "div_mipi0", DIV_LCD0, 20, 4,
CLK_SET_RATE_PARENT, 0),
DIV_F(none, "div_mmc_pre0", "div_mmc0", DIV_FSYS1, 8, 8,
@@ -554,7 +559,7 @@ struct samsung_div_clock exynos4_div_clks[] __initdata = {
};
/* list of divider clocks supported in exynos4210 soc */
-struct samsung_div_clock exynos4210_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4210_div_clks[] __initdata = {
DIV(aclk200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
DIV(none, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
@@ -565,7 +570,7 @@ struct samsung_div_clock exynos4210_div_clks[] __initdata = {
};
/* list of divider clocks supported in exynos4x12 soc */
-struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
+static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
DIV(none, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
DIV(none, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
DIV(none, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
@@ -594,7 +599,7 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
};
/* list of gate clocks supported in all exynos4 soc's */
-struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
/*
* After all Exynos4 based platforms are migrated to use device tree,
* the device name and clock alias names specified below for some
@@ -629,164 +634,151 @@ struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
GATE(sclk_audio1, "sclk_audio1", "div_audio1", SRC_MASK_PERIL1, 0,
CLK_SET_RATE_PARENT, 0),
- GATE_D(vp, "s5p-mixer", "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
- GATE_D(mixer, "s5p-mixer", "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
- GATE_D(hdmi, "exynos4-hdmi", "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
- GATE_A(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0, "timers"),
- GATE_A(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0, "biu"),
- GATE_A(usb_host, "usb_host", "aclk133",
- GATE_IP_FSYS, 12, 0, 0, "usbhost"),
- GATE_DA(sclk_fimc0, "exynos4-fimc.0", "sclk_fimc0", "div_fimc0",
- SRC_MASK_CAM, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_fimc1, "exynos4-fimc.1", "sclk_fimc1", "div_fimc1",
- SRC_MASK_CAM, 4, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_fimc2, "exynos4-fimc.2", "sclk_fimc2", "div_fimc2",
- SRC_MASK_CAM, 8, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_fimc3, "exynos4-fimc.3", "sclk_fimc3", "div_fimc3",
- SRC_MASK_CAM, 12, CLK_SET_RATE_PARENT, 0, "sclk_fimc"),
- GATE_DA(sclk_csis0, "s5p-mipi-csis.0", "sclk_csis0", "div_csis0",
- SRC_MASK_CAM, 24, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
- GATE_DA(sclk_csis1, "s5p-mipi-csis.1", "sclk_csis1", "div_csis1",
- SRC_MASK_CAM, 28, CLK_SET_RATE_PARENT, 0, "sclk_csis"),
- GATE_DA(sclk_fimd0, "exynos4-fb.0", "sclk_fimd0", "div_fimd0",
- SRC_MASK_LCD0, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
- GATE_DA(sclk_mmc0, "exynos4-sdhci.0", "sclk_mmc0", "div_mmc_pre0",
- SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc1, "exynos4-sdhci.1", "sclk_mmc1", "div_mmc_pre1",
- SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc2, "exynos4-sdhci.2", "sclk_mmc2", "div_mmc_pre2",
- SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc3, "exynos4-sdhci.3", "sclk_mmc3", "div_mmc_pre3",
- SRC_MASK_FSYS, 12, CLK_SET_RATE_PARENT, 0,
- "mmc_busclk.2"),
- GATE_DA(sclk_mmc4, NULL, "sclk_mmc4", "div_mmc_pre4",
- SRC_MASK_FSYS, 16, CLK_SET_RATE_PARENT, 0, "ciu"),
- GATE_DA(sclk_uart0, "exynos4210-uart.0", "uclk0", "div_uart0",
- SRC_MASK_PERIL0, 0, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart1, "exynos4210-uart.1", "uclk1", "div_uart1",
- SRC_MASK_PERIL0, 4, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart2, "exynos4210-uart.2", "uclk2", "div_uart2",
- SRC_MASK_PERIL0, 8, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart3, "exynos4210-uart.3", "uclk3", "div_uart3",
- SRC_MASK_PERIL0, 12, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
- GATE_DA(sclk_uart4, "exynos4210-uart.4", "uclk4", "div_uart4",
- SRC_MASK_PERIL0, 16, CLK_SET_RATE_PARENT,
- 0, "clk_uart_baud0"),
+ GATE(vp, "vp", "aclk160", GATE_IP_TV, 0, 0, 0),
+ GATE(mixer, "mixer", "aclk160", GATE_IP_TV, 1, 0, 0),
+ GATE(hdmi, "hdmi", "aclk160", GATE_IP_TV, 3, 0, 0),
+ GATE(pwm, "pwm", "aclk100", GATE_IP_PERIL, 24, 0, 0),
+ GATE(sdmmc4, "sdmmc4", "aclk133", GATE_IP_FSYS, 9, 0, 0),
+ GATE(usb_host, "usb_host", "aclk133", GATE_IP_FSYS, 12, 0, 0),
+ GATE(sclk_fimc0, "sclk_fimc0", "div_fimc0", SRC_MASK_CAM, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimc1, "sclk_fimc1", "div_fimc1", SRC_MASK_CAM, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimc2, "sclk_fimc2", "div_fimc2", SRC_MASK_CAM, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimc3, "sclk_fimc3", "div_fimc3", SRC_MASK_CAM, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_csis0, "sclk_csis0", "div_csis0", SRC_MASK_CAM, 24,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_csis1, "sclk_csis1", "div_csis1", SRC_MASK_CAM, 28,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_fimd0, "sclk_fimd0", "div_fimd0", SRC_MASK_LCD0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc0, "sclk_mmc0", "div_mmc_pre0", SRC_MASK_FSYS, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc1, "sclk_mmc1", "div_mmc_pre1", SRC_MASK_FSYS, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc2, "sclk_mmc2", "div_mmc_pre2", SRC_MASK_FSYS, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc3, "sclk_mmc3", "div_mmc_pre3", SRC_MASK_FSYS, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_mmc4, "sclk_mmc4", "div_mmc_pre4", SRC_MASK_FSYS, 16,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart0, "uclk0", "div_uart0", SRC_MASK_PERIL0, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart1, "uclk1", "div_uart1", SRC_MASK_PERIL0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart2, "uclk2", "div_uart2", SRC_MASK_PERIL0, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart3, "uclk3", "div_uart3", SRC_MASK_PERIL0, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_uart4, "uclk4", "div_uart4", SRC_MASK_PERIL0, 16,
+ CLK_SET_RATE_PARENT, 0),
GATE(sclk_audio2, "sclk_audio2", "div_audio2", SRC_MASK_PERIL1, 4,
CLK_SET_RATE_PARENT, 0),
- GATE_DA(sclk_spi0, "exynos4210-spi.0", "sclk_spi0", "div_spi_pre0",
- SRC_MASK_PERIL1, 16, CLK_SET_RATE_PARENT,
- 0, "spi_busclk0"),
- GATE_DA(sclk_spi1, "exynos4210-spi.1", "sclk_spi1", "div_spi_pre1",
- SRC_MASK_PERIL1, 20, CLK_SET_RATE_PARENT,
- 0, "spi_busclk0"),
- GATE_DA(sclk_spi2, "exynos4210-spi.2", "sclk_spi2", "div_spi_pre2",
- SRC_MASK_PERIL1, 24, CLK_SET_RATE_PARENT,
- 0, "spi_busclk0"),
- GATE_DA(fimc0, "exynos4-fimc.0", "fimc0", "aclk160",
- GATE_IP_CAM, 0, 0, 0, "fimc"),
- GATE_DA(fimc1, "exynos4-fimc.1", "fimc1", "aclk160",
- GATE_IP_CAM, 1, 0, 0, "fimc"),
- GATE_DA(fimc2, "exynos4-fimc.2", "fimc2", "aclk160",
- GATE_IP_CAM, 2, 0, 0, "fimc"),
- GATE_DA(fimc3, "exynos4-fimc.3", "fimc3", "aclk160",
- GATE_IP_CAM, 3, 0, 0, "fimc"),
- GATE_DA(csis0, "s5p-mipi-csis.0", "csis0", "aclk160",
- GATE_IP_CAM, 4, 0, 0, "fimc"),
- GATE_DA(csis1, "s5p-mipi-csis.1", "csis1", "aclk160",
- GATE_IP_CAM, 5, 0, 0, "fimc"),
- GATE_DA(smmu_fimc0, "exynos-sysmmu.5", "smmu_fimc0", "aclk160",
- GATE_IP_CAM, 7, 0, 0, "sysmmu"),
- GATE_DA(smmu_fimc1, "exynos-sysmmu.6", "smmu_fimc1", "aclk160",
- GATE_IP_CAM, 8, 0, 0, "sysmmu"),
- GATE_DA(smmu_fimc2, "exynos-sysmmu.7", "smmu_fimc2", "aclk160",
- GATE_IP_CAM, 9, 0, 0, "sysmmu"),
- GATE_DA(smmu_fimc3, "exynos-sysmmu.8", "smmu_fimc3", "aclk160",
- GATE_IP_CAM, 10, 0, 0, "sysmmu"),
- GATE_DA(smmu_jpeg, "exynos-sysmmu.3", "smmu_jpeg", "aclk160",
- GATE_IP_CAM, 11, 0, 0, "sysmmu"),
+ GATE(sclk_spi0, "sclk_spi0", "div_spi_pre0", SRC_MASK_PERIL1, 16,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi1, "sclk_spi1", "div_spi_pre1", SRC_MASK_PERIL1, 20,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(sclk_spi2, "sclk_spi2", "div_spi_pre2", SRC_MASK_PERIL1, 24,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(fimc0, "fimc0", "aclk160", GATE_IP_CAM, 0,
+ 0, 0),
+ GATE(fimc1, "fimc1", "aclk160", GATE_IP_CAM, 1,
+ 0, 0),
+ GATE(fimc2, "fimc2", "aclk160", GATE_IP_CAM, 2,
+ 0, 0),
+ GATE(fimc3, "fimc3", "aclk160", GATE_IP_CAM, 3,
+ 0, 0),
+ GATE(csis0, "csis0", "aclk160", GATE_IP_CAM, 4,
+ 0, 0),
+ GATE(csis1, "csis1", "aclk160", GATE_IP_CAM, 5,
+ 0, 0),
+ GATE(smmu_fimc0, "smmu_fimc0", "aclk160", GATE_IP_CAM, 7,
+ 0, 0),
+ GATE(smmu_fimc1, "smmu_fimc1", "aclk160", GATE_IP_CAM, 8,
+ 0, 0),
+ GATE(smmu_fimc2, "smmu_fimc2", "aclk160", GATE_IP_CAM, 9,
+ 0, 0),
+ GATE(smmu_fimc3, "smmu_fimc3", "aclk160", GATE_IP_CAM, 10,
+ 0, 0),
+ GATE(smmu_jpeg, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
+ 0, 0),
GATE(pixelasyncm0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
GATE(pixelasyncm1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
- GATE_DA(smmu_tv, "exynos-sysmmu.2", "smmu_tv", "aclk160",
- GATE_IP_TV, 4, 0, 0, "sysmmu"),
- GATE_DA(mfc, "s5p-mfc", "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0, "mfc"),
- GATE_DA(smmu_mfcl, "exynos-sysmmu.0", "smmu_mfcl", "aclk100",
- GATE_IP_MFC, 1, 0, 0, "sysmmu"),
- GATE_DA(smmu_mfcr, "exynos-sysmmu.1", "smmu_mfcr", "aclk100",
- GATE_IP_MFC, 2, 0, 0, "sysmmu"),
- GATE_DA(fimd0, "exynos4-fb.0", "fimd0", "aclk160",
- GATE_IP_LCD0, 0, 0, 0, "fimd"),
- GATE_DA(smmu_fimd0, "exynos-sysmmu.10", "smmu_fimd0", "aclk160",
- GATE_IP_LCD0, 4, 0, 0, "sysmmu"),
- GATE_DA(pdma0, "dma-pl330.0", "pdma0", "aclk133",
- GATE_IP_FSYS, 0, 0, 0, "dma"),
- GATE_DA(pdma1, "dma-pl330.1", "pdma1", "aclk133",
- GATE_IP_FSYS, 1, 0, 0, "dma"),
- GATE_DA(sdmmc0, "exynos4-sdhci.0", "sdmmc0", "aclk133",
- GATE_IP_FSYS, 5, 0, 0, "hsmmc"),
- GATE_DA(sdmmc1, "exynos4-sdhci.1", "sdmmc1", "aclk133",
- GATE_IP_FSYS, 6, 0, 0, "hsmmc"),
- GATE_DA(sdmmc2, "exynos4-sdhci.2", "sdmmc2", "aclk133",
- GATE_IP_FSYS, 7, 0, 0, "hsmmc"),
- GATE_DA(sdmmc3, "exynos4-sdhci.3", "sdmmc3", "aclk133",
- GATE_IP_FSYS, 8, 0, 0, "hsmmc"),
- GATE_DA(uart0, "exynos4210-uart.0", "uart0", "aclk100",
- GATE_IP_PERIL, 0, 0, 0, "uart"),
- GATE_DA(uart1, "exynos4210-uart.1", "uart1", "aclk100",
- GATE_IP_PERIL, 1, 0, 0, "uart"),
- GATE_DA(uart2, "exynos4210-uart.2", "uart2", "aclk100",
- GATE_IP_PERIL, 2, 0, 0, "uart"),
- GATE_DA(uart3, "exynos4210-uart.3", "uart3", "aclk100",
- GATE_IP_PERIL, 3, 0, 0, "uart"),
- GATE_DA(uart4, "exynos4210-uart.4", "uart4", "aclk100",
- GATE_IP_PERIL, 4, 0, 0, "uart"),
- GATE_DA(i2c0, "s3c2440-i2c.0", "i2c0", "aclk100",
- GATE_IP_PERIL, 6, 0, 0, "i2c"),
- GATE_DA(i2c1, "s3c2440-i2c.1", "i2c1", "aclk100",
- GATE_IP_PERIL, 7, 0, 0, "i2c"),
- GATE_DA(i2c2, "s3c2440-i2c.2", "i2c2", "aclk100",
- GATE_IP_PERIL, 8, 0, 0, "i2c"),
- GATE_DA(i2c3, "s3c2440-i2c.3", "i2c3", "aclk100",
- GATE_IP_PERIL, 9, 0, 0, "i2c"),
- GATE_DA(i2c4, "s3c2440-i2c.4", "i2c4", "aclk100",
- GATE_IP_PERIL, 10, 0, 0, "i2c"),
- GATE_DA(i2c5, "s3c2440-i2c.5", "i2c5", "aclk100",
- GATE_IP_PERIL, 11, 0, 0, "i2c"),
- GATE_DA(i2c6, "s3c2440-i2c.6", "i2c6", "aclk100",
- GATE_IP_PERIL, 12, 0, 0, "i2c"),
- GATE_DA(i2c7, "s3c2440-i2c.7", "i2c7", "aclk100",
- GATE_IP_PERIL, 13, 0, 0, "i2c"),
- GATE_DA(i2c_hdmi, "s3c2440-hdmiphy-i2c", "i2c-hdmi", "aclk100",
- GATE_IP_PERIL, 14, 0, 0, "i2c"),
- GATE_DA(spi0, "exynos4210-spi.0", "spi0", "aclk100",
- GATE_IP_PERIL, 16, 0, 0, "spi"),
- GATE_DA(spi1, "exynos4210-spi.1", "spi1", "aclk100",
- GATE_IP_PERIL, 17, 0, 0, "spi"),
- GATE_DA(spi2, "exynos4210-spi.2", "spi2", "aclk100",
- GATE_IP_PERIL, 18, 0, 0, "spi"),
- GATE_DA(i2s1, "samsung-i2s.1", "i2s1", "aclk100",
- GATE_IP_PERIL, 20, 0, 0, "iis"),
- GATE_DA(i2s2, "samsung-i2s.2", "i2s2", "aclk100",
- GATE_IP_PERIL, 21, 0, 0, "iis"),
- GATE_DA(pcm1, "samsung-pcm.1", "pcm1", "aclk100",
- GATE_IP_PERIL, 22, 0, 0, "pcm"),
- GATE_DA(pcm2, "samsung-pcm.2", "pcm2", "aclk100",
- GATE_IP_PERIL, 23, 0, 0, "pcm"),
- GATE_DA(spdif, "samsung-spdif", "spdif", "aclk100",
- GATE_IP_PERIL, 26, 0, 0, "spdif"),
- GATE_DA(ac97, "samsung-ac97", "ac97", "aclk100",
- GATE_IP_PERIL, 27, 0, 0, "ac97"),
+ GATE(smmu_tv, "smmu_tv", "aclk160", GATE_IP_TV, 4,
+ 0, 0),
+ GATE(mfc, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
+ GATE(smmu_mfcl, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
+ 0, 0),
+ GATE(smmu_mfcr, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
+ 0, 0),
+ GATE(fimd0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
+ 0, 0),
+ GATE(smmu_fimd0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
+ 0, 0),
+ GATE(pdma0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
+ 0, 0),
+ GATE(pdma1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
+ 0, 0),
+ GATE(sdmmc0, "sdmmc0", "aclk133", GATE_IP_FSYS, 5,
+ 0, 0),
+ GATE(sdmmc1, "sdmmc1", "aclk133", GATE_IP_FSYS, 6,
+ 0, 0),
+ GATE(sdmmc2, "sdmmc2", "aclk133", GATE_IP_FSYS, 7,
+ 0, 0),
+ GATE(sdmmc3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
+ 0, 0),
+ GATE(uart0, "uart0", "aclk100", GATE_IP_PERIL, 0,
+ 0, 0),
+ GATE(uart1, "uart1", "aclk100", GATE_IP_PERIL, 1,
+ 0, 0),
+ GATE(uart2, "uart2", "aclk100", GATE_IP_PERIL, 2,
+ 0, 0),
+ GATE(uart3, "uart3", "aclk100", GATE_IP_PERIL, 3,
+ 0, 0),
+ GATE(uart4, "uart4", "aclk100", GATE_IP_PERIL, 4,
+ 0, 0),
+ GATE(i2c0, "i2c0", "aclk100", GATE_IP_PERIL, 6,
+ 0, 0),
+ GATE(i2c1, "i2c1", "aclk100", GATE_IP_PERIL, 7,
+ 0, 0),
+ GATE(i2c2, "i2c2", "aclk100", GATE_IP_PERIL, 8,
+ 0, 0),
+ GATE(i2c3, "i2c3", "aclk100", GATE_IP_PERIL, 9,
+ 0, 0),
+ GATE(i2c4, "i2c4", "aclk100", GATE_IP_PERIL, 10,
+ 0, 0),
+ GATE(i2c5, "i2c5", "aclk100", GATE_IP_PERIL, 11,
+ 0, 0),
+ GATE(i2c6, "i2c6", "aclk100", GATE_IP_PERIL, 12,
+ 0, 0),
+ GATE(i2c7, "i2c7", "aclk100", GATE_IP_PERIL, 13,
+ 0, 0),
+ GATE(i2c_hdmi, "i2c-hdmi", "aclk100", GATE_IP_PERIL, 14,
+ 0, 0),
+ GATE(spi0, "spi0", "aclk100", GATE_IP_PERIL, 16,
+ 0, 0),
+ GATE(spi1, "spi1", "aclk100", GATE_IP_PERIL, 17,
+ 0, 0),
+ GATE(spi2, "spi2", "aclk100", GATE_IP_PERIL, 18,
+ 0, 0),
+ GATE(i2s1, "i2s1", "aclk100", GATE_IP_PERIL, 20,
+ 0, 0),
+ GATE(i2s2, "i2s2", "aclk100", GATE_IP_PERIL, 21,
+ 0, 0),
+ GATE(pcm1, "pcm1", "aclk100", GATE_IP_PERIL, 22,
+ 0, 0),
+ GATE(pcm2, "pcm2", "aclk100", GATE_IP_PERIL, 23,
+ 0, 0),
+ GATE(spdif, "spdif", "aclk100", GATE_IP_PERIL, 26,
+ 0, 0),
+ GATE(ac97, "ac97", "aclk100", GATE_IP_PERIL, 27,
+ 0, 0),
};
/* list of gate clocks supported in exynos4210 soc */
-struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
GATE(tvenc, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
GATE(g2d, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
GATE(rotator, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
@@ -811,17 +803,23 @@ struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
GATE(sclk_mixer, "sclk_mixer", "mout_mixer", SRC_MASK_TV, 4, 0, 0),
GATE(sclk_dac, "sclk_dac", "mout_dac", SRC_MASK_TV, 8, 0, 0),
- GATE_A(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15, 0, 0, "adc"),
- GATE_A(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13, 0, 0, "mct"),
- GATE_A(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
- GATE_A(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15, 0, 0, "rtc"),
- GATE_A(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16, 0, 0, "keypad"),
- GATE_DA(sclk_fimd1, "exynos4-fb.1", "sclk_fimd1", "div_fimd1",
- E4210_SRC_MASK_LCD1, 0, CLK_SET_RATE_PARENT, 0, "sclk_fimd"),
+ GATE(tsadc, "tsadc", "aclk100", GATE_IP_PERIL, 15,
+ 0, 0),
+ GATE(mct, "mct", "aclk100", E4210_GATE_IP_PERIR, 13,
+ 0, 0),
+ GATE(wdt, "watchdog", "aclk100", E4210_GATE_IP_PERIR, 14,
+ 0, 0),
+ GATE(rtc, "rtc", "aclk100", E4210_GATE_IP_PERIR, 15,
+ 0, 0),
+ GATE(keyif, "keyif", "aclk100", E4210_GATE_IP_PERIR, 16,
+ 0, 0),
+ GATE(sclk_fimd1, "sclk_fimd1", "div_fimd1", E4210_SRC_MASK_LCD1, 0,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(tmu_apbif, "tmu_apbif", "aclk100", E4210_GATE_IP_PERIR, 17, 0, 0),
};
/* list of gate clocks supported in exynos4x12 soc */
-struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE(audss, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
GATE(mdnie0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
GATE(rotator, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
@@ -840,10 +838,11 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
GATE(smmu_rotator, "smmu_rotator", "aclk200",
E4X12_GATE_IP_IMAGE, 4, 0, 0),
- GATE_A(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13, 0, 0, "mct"),
- GATE_A(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 0, 0, "rtc"),
- GATE_A(keyif, "keyif", "aclk100",
- E4X12_GATE_IP_PERIR, 16, 0, 0, "keypad"),
+ GATE(mct, "mct", "aclk100", E4X12_GATE_IP_PERIR, 13,
+ 0, 0),
+ GATE(rtc, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
+ 0, 0),
+ GATE(keyif, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
GATE(sclk_pwm_isp, "sclk_pwm_isp", "div_pwm_isp",
E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
GATE(sclk_spi0_isp, "sclk_spi0_isp", "div_spi0_isp_pre",
@@ -860,12 +859,11 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
E4X12_GATE_IP_ISP, 2, 0, 0),
GATE(uart_isp_sclk, "uart_isp_sclk", "sclk_uart_isp",
E4X12_GATE_IP_ISP, 3, 0, 0),
- GATE_A(wdt, "watchdog", "aclk100",
- E4X12_GATE_IP_PERIR, 14, 0, 0, "watchdog"),
- GATE_DA(pcm0, "samsung-pcm.0", "pcm0", "aclk100",
- E4X12_GATE_IP_MAUDIO, 2, 0, 0, "pcm"),
- GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
- E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
+ GATE(wdt, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
+ GATE(pcm0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
+ 0, 0),
+ GATE(i2s0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
+ 0, 0),
GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
@@ -919,6 +917,21 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+ GATE(tmu_apbif, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0, 0),
+};
+
+static struct samsung_clock_alias exynos4_aliases[] __initdata = {
+ ALIAS(mout_core, NULL, "moutcore"),
+ ALIAS(arm_clk, NULL, "armclk"),
+ ALIAS(sclk_apll, NULL, "mout_apll"),
+};
+
+static struct samsung_clock_alias exynos4210_aliases[] __initdata = {
+ ALIAS(sclk_mpll, NULL, "mout_mpll"),
+};
+
+static struct samsung_clock_alias exynos4x12_aliases[] __initdata = {
+ ALIAS(mout_mpll_user_c, NULL, "mout_mpll"),
};
/*
@@ -973,36 +986,116 @@ static void __init exynos4_clk_register_finpll(unsigned long xom)
}
-/*
- * This function allows non-dt platforms to specify the clock speed of the
- * xxti and xusbxti clocks. These clocks are then registered with the specified
- * clock speed.
- */
-void __init exynos4_clk_register_fixed_ext(unsigned long xxti_f,
- unsigned long xusbxti_f)
-{
- exynos4_fixed_rate_ext_clks[0].fixed_rate = xxti_f;
- exynos4_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
- samsung_clk_register_fixed_rate(exynos4_fixed_rate_ext_clks,
- ARRAY_SIZE(exynos4_fixed_rate_ext_clks));
-}
-
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
{ .compatible = "samsung,clock-xusbxti", .data = (void *)1, },
{},
};
+/* PLLs PMS values */
+static struct samsung_pll_rate_table exynos4210_apll_rates[] __initdata = {
+ PLL_45XX_RATE(1200000000, 150, 3, 1, 28),
+ PLL_45XX_RATE(1000000000, 250, 6, 1, 28),
+ PLL_45XX_RATE( 800000000, 200, 6, 1, 28),
+ PLL_45XX_RATE( 666857142, 389, 14, 1, 13),
+ PLL_45XX_RATE( 600000000, 100, 4, 1, 13),
+ PLL_45XX_RATE( 533000000, 533, 24, 1, 5),
+ PLL_45XX_RATE( 500000000, 250, 6, 2, 28),
+ PLL_45XX_RATE( 400000000, 200, 6, 2, 28),
+ PLL_45XX_RATE( 200000000, 200, 6, 3, 28),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4210_epll_rates[] __initdata = {
+ PLL_4600_RATE(192000000, 48, 3, 1, 0, 0),
+ PLL_4600_RATE(180633605, 45, 3, 1, 10381, 0),
+ PLL_4600_RATE(180000000, 45, 3, 1, 0, 0),
+ PLL_4600_RATE( 73727996, 73, 3, 3, 47710, 1),
+ PLL_4600_RATE( 67737602, 90, 4, 3, 20762, 1),
+ PLL_4600_RATE( 49151992, 49, 3, 3, 9961, 0),
+ PLL_4600_RATE( 45158401, 45, 3, 3, 10381, 0),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4210_vpll_rates[] __initdata = {
+ PLL_4650_RATE(360000000, 44, 3, 0, 1024, 0, 14, 0),
+ PLL_4650_RATE(324000000, 53, 2, 1, 1024, 1, 1, 1),
+ PLL_4650_RATE(259617187, 63, 3, 1, 1950, 0, 20, 1),
+ PLL_4650_RATE(110000000, 53, 3, 2, 2048, 0, 17, 0),
+ PLL_4650_RATE( 55360351, 53, 3, 3, 2417, 0, 17, 0),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_apll_rates[] __initdata = {
+ PLL_35XX_RATE(1500000000, 250, 4, 0),
+ PLL_35XX_RATE(1400000000, 175, 3, 0),
+ PLL_35XX_RATE(1300000000, 325, 6, 0),
+ PLL_35XX_RATE(1200000000, 200, 4, 0),
+ PLL_35XX_RATE(1100000000, 275, 6, 0),
+ PLL_35XX_RATE(1000000000, 125, 3, 0),
+ PLL_35XX_RATE( 900000000, 150, 4, 0),
+ PLL_35XX_RATE( 800000000, 100, 3, 0),
+ PLL_35XX_RATE( 700000000, 175, 3, 1),
+ PLL_35XX_RATE( 600000000, 200, 4, 1),
+ PLL_35XX_RATE( 500000000, 125, 3, 1),
+ PLL_35XX_RATE( 400000000, 100, 3, 1),
+ PLL_35XX_RATE( 300000000, 200, 4, 2),
+ PLL_35XX_RATE( 200000000, 100, 3, 2),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_epll_rates[] __initdata = {
+ PLL_36XX_RATE(192000000, 48, 3, 1, 0),
+ PLL_36XX_RATE(180633605, 45, 3, 1, 10381),
+ PLL_36XX_RATE(180000000, 45, 3, 1, 0),
+ PLL_36XX_RATE( 73727996, 73, 3, 3, 47710),
+ PLL_36XX_RATE( 67737602, 90, 4, 3, 20762),
+ PLL_36XX_RATE( 49151992, 49, 3, 3, 9961),
+ PLL_36XX_RATE( 45158401, 45, 3, 3, 10381),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initdata = {
+ PLL_36XX_RATE(533000000, 133, 3, 1, 16384),
+ PLL_36XX_RATE(440000000, 110, 3, 1, 0),
+ PLL_36XX_RATE(350000000, 175, 3, 2, 0),
+ PLL_36XX_RATE(266000000, 133, 3, 2, 0),
+ PLL_36XX_RATE(160000000, 160, 3, 3, 0),
+ PLL_36XX_RATE(106031250, 53, 3, 2, 1024),
+ PLL_36XX_RATE( 53015625, 53, 3, 3, 1024),
+ { /* sentinel */ }
+};
+
+static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
+ [apll] = PLL_A(pll_4508, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_4508, fout_mpll, "fout_mpll", "fin_pll",
+ E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
+ [epll] = PLL_A(pll_4600, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ EPLL_CON0, "fout_epll", NULL),
+ [vpll] = PLL_A(pll_4650c, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
+};
+
+static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
+ [apll] = PLL(pll_35xx, fout_apll, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [mpll] = PLL(pll_35xx, fout_mpll, "fout_mpll", "fin_pll",
+ E4X12_MPLL_LOCK, E4X12_MPLL_CON0, NULL),
+ [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+ [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "fin_pll",
+ VPLL_LOCK, VPLL_CON0, NULL),
+};
+
/* register exynos4 clocks */
-void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_soc, void __iomem *reg_base, unsigned long xom)
+static void __init exynos4_clk_init(struct device_node *np,
+ enum exynos4_soc exynos4_soc,
+ void __iomem *reg_base, unsigned long xom)
{
- struct clk *apll, *mpll, *epll, *vpll;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
if (exynos4_soc == EXYNOS4210)
samsung_clk_init(np, reg_base, nr_clks,
@@ -1013,37 +1106,42 @@ void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_so
exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs),
exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save));
- if (np)
- samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
+ samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks,
ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
ext_clk_match);
exynos4_clk_register_finpll(xom);
if (exynos4_soc == EXYNOS4210) {
- apll = samsung_clk_register_pll45xx("fout_apll", "fin_pll",
- reg_base + APLL_CON0, pll_4508);
- mpll = samsung_clk_register_pll45xx("fout_mpll", "fin_pll",
- reg_base + E4210_MPLL_CON0, pll_4508);
- epll = samsung_clk_register_pll46xx("fout_epll", "fin_pll",
- reg_base + EPLL_CON0, pll_4600);
- vpll = samsung_clk_register_pll46xx("fout_vpll", "mout_vpllsrc",
- reg_base + VPLL_CON0, pll_4650c);
+ samsung_clk_register_mux(exynos4210_mux_early,
+ ARRAY_SIZE(exynos4210_mux_early));
+
+ if (_get_rate("fin_pll") == 24000000) {
+ exynos4210_plls[apll].rate_table =
+ exynos4210_apll_rates;
+ exynos4210_plls[epll].rate_table =
+ exynos4210_epll_rates;
+ }
+
+ if (_get_rate("mout_vpllsrc") == 24000000)
+ exynos4210_plls[vpll].rate_table =
+ exynos4210_vpll_rates;
+
+ samsung_clk_register_pll(exynos4210_plls,
+ ARRAY_SIZE(exynos4210_plls), reg_base);
} else {
- apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
- reg_base + APLL_CON0);
- mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
- reg_base + E4X12_MPLL_CON0);
- epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
- reg_base + EPLL_CON0);
- vpll = samsung_clk_register_pll36xx("fout_vpll", "fin_pll",
- reg_base + VPLL_CON0);
- }
+ if (_get_rate("fin_pll") == 24000000) {
+ exynos4x12_plls[apll].rate_table =
+ exynos4x12_apll_rates;
+ exynos4x12_plls[epll].rate_table =
+ exynos4x12_epll_rates;
+ exynos4x12_plls[vpll].rate_table =
+ exynos4x12_vpll_rates;
+ }
- samsung_clk_add_lookup(apll, fout_apll);
- samsung_clk_add_lookup(mpll, fout_mpll);
- samsung_clk_add_lookup(epll, fout_epll);
- samsung_clk_add_lookup(vpll, fout_vpll);
+ samsung_clk_register_pll(exynos4x12_plls,
+ ARRAY_SIZE(exynos4x12_plls), reg_base);
+ }
samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks,
ARRAY_SIZE(exynos4_fixed_rate_clks));
@@ -1063,6 +1161,8 @@ void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_so
ARRAY_SIZE(exynos4210_div_clks));
samsung_clk_register_gate(exynos4210_gate_clks,
ARRAY_SIZE(exynos4210_gate_clks));
+ samsung_clk_register_alias(exynos4210_aliases,
+ ARRAY_SIZE(exynos4210_aliases));
} else {
samsung_clk_register_mux(exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
@@ -1070,14 +1170,19 @@ void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_so
ARRAY_SIZE(exynos4x12_div_clks));
samsung_clk_register_gate(exynos4x12_gate_clks,
ARRAY_SIZE(exynos4x12_gate_clks));
+ samsung_clk_register_alias(exynos4x12_aliases,
+ ARRAY_SIZE(exynos4x12_aliases));
}
+ samsung_clk_register_alias(exynos4_aliases,
+ ARRAY_SIZE(exynos4_aliases));
+
pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
"\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
- _get_rate("sclk_apll"), _get_rate("mout_mpll"),
+ _get_rate("sclk_apll"), _get_rate("sclk_mpll"),
_get_rate("sclk_epll"), _get_rate("sclk_vpll"),
- _get_rate("armclk"));
+ _get_rate("arm_clk"));
}
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 6f767c5..adf3234 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -17,11 +17,22 @@
#include <linux/of_address.h>
#include "clk.h"
-#include "clk-pll.h"
+#define APLL_LOCK 0x0
+#define APLL_CON0 0x100
#define SRC_CPU 0x200
#define DIV_CPU0 0x500
+#define MPLL_LOCK 0x4000
+#define MPLL_CON0 0x4100
#define SRC_CORE1 0x4204
+#define CPLL_LOCK 0x10020
+#define EPLL_LOCK 0x10030
+#define VPLL_LOCK 0x10040
+#define GPLL_LOCK 0x10050
+#define CPLL_CON0 0x10120
+#define EPLL_CON0 0x10130
+#define VPLL_CON0 0x10140
+#define GPLL_CON0 0x10150
#define SRC_TOP0 0x10210
#define SRC_TOP2 0x10218
#define SRC_GSCL 0x10220
@@ -59,9 +70,18 @@
#define GATE_IP_FSYS 0x10944
#define GATE_IP_PERIC 0x10950
#define GATE_IP_PERIS 0x10960
+#define BPLL_LOCK 0x20010
+#define BPLL_CON0 0x20110
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
#define GATE_IP_DISP1 0x10928
+#define GATE_IP_ACP 0x10000
+
+/* list of PLLs to be registered */
+enum exynos5250_plls {
+ apll, mpll, cpll, epll, vpll, gpll, bpll,
+ nr_plls /* number of PLLs */
+};
/*
* Let each supported clock get a unique id. This id is used to lookup the clock
@@ -79,7 +99,8 @@ enum exynos5250_clks {
none,
/* core clocks */
- fin_pll,
+ fin_pll, fout_apll, fout_mpll, fout_bpll, fout_gpll, fout_cpll,
+ fout_epll, fout_vpll,
/* gate for special clocks (sclk) */
sclk_cam_bayer = 128, sclk_cam0, sclk_cam1, sclk_gscl_wa, sclk_gscl_wb,
@@ -87,7 +108,7 @@ enum exynos5250_clks {
sclk_mmc0, sclk_mmc1, sclk_mmc2, sclk_mmc3, sclk_sata, sclk_usb3,
sclk_jpeg, sclk_uart0, sclk_uart1, sclk_uart2, sclk_uart3, sclk_pwm,
sclk_audio1, sclk_audio2, sclk_spdif, sclk_spi0, sclk_spi1, sclk_spi2,
- div_i2s1, div_i2s2,
+ div_i2s1, div_i2s2, sclk_hdmiphy,
/* gate clocks */
gscl0 = 256, gscl1, gscl2, gscl3, gscl_wa, gscl_wb, smmu_gscl0,
@@ -99,7 +120,10 @@ enum exynos5250_clks {
spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
- wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi,
+ wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
+
+ /* mux clocks */
+ mout_hdmi = 1024,
nr_clks,
};
@@ -108,7 +132,7 @@ enum exynos5250_clks {
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static __initdata unsigned long exynos5250_clk_regs[] = {
+static unsigned long exynos5250_clk_regs[] __initdata = {
SRC_CPU,
DIV_CPU0,
SRC_CORE1,
@@ -152,6 +176,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
SRC_CDREX,
PLL_DIV2_SEL,
GATE_IP_DISP1,
+ GATE_IP_ACP,
};
/* list of all parent clock list */
@@ -191,31 +216,34 @@ PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
"spdif_extclk" };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
+ FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
FRATE(none, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
};
-struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
FFACTOR(none, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
FFACTOR(none, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
};
-struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initdata = {
+ MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+};
+
+static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
- MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
MUX(none, "sclk_vpll", mout_vpll_p, SRC_TOP2, 16, 1),
MUX(none, "sclk_epll", mout_epll_p, SRC_TOP2, 12, 1),
MUX(none, "sclk_cpll", mout_cpll_p, SRC_TOP2, 8, 1),
@@ -232,7 +260,7 @@ struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX(none, "mout_fimd1", mout_group1_p, SRC_DISP1_0, 0, 4),
MUX(none, "mout_mipi1", mout_group1_p, SRC_DISP1_0, 12, 4),
MUX(none, "mout_dp", mout_group1_p, SRC_DISP1_0, 16, 4),
- MUX(none, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
+ MUX(mout_hdmi, "mout_hdmi", mout_hdmi_p, SRC_DISP1_0, 20, 1),
MUX(none, "mout_audio0", mout_audio0_p, SRC_MAU, 0, 4),
MUX(none, "mout_mmc0", mout_group1_p, SRC_FSYS, 0, 4),
MUX(none, "mout_mmc1", mout_group1_p, SRC_FSYS, 4, 4),
@@ -254,7 +282,7 @@ struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX(none, "mout_spi2", mout_group1_p, SRC_PERIC1, 24, 4),
};
-struct samsung_div_clock exynos5250_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV(none, "aclk66_pre", "sclk_mpll_user", DIV_TOP1, 24, 3),
@@ -314,7 +342,7 @@ struct samsung_div_clock exynos5250_div_clks[] __initdata = {
DIV_PERIC2, 8, 8, CLK_SET_RATE_PARENT, 0),
};
-struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(gscl0, "gscl0", "none", GATE_IP_GSCL, 0, 0, 0),
GATE(gscl1, "gscl1", "none", GATE_IP_GSCL, 1, 0, 0),
GATE(gscl2, "gscl2", "aclk266", GATE_IP_GSCL, 2, 0, 0),
@@ -461,20 +489,60 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(mie1, "mie1", "aclk200", GATE_IP_DISP1, 1, 0, 0),
GATE(dsim0, "dsim0", "aclk200", GATE_IP_DISP1, 3, 0, 0),
GATE(dp, "dp", "aclk200", GATE_IP_DISP1, 4, 0, 0),
- GATE(mixer, "mixer", "aclk200", GATE_IP_DISP1, 5, 0, 0),
- GATE(hdmi, "hdmi", "aclk200", GATE_IP_DISP1, 6, 0, 0),
+ GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
+ GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
+ GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
+};
+
+static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ /* PLL_36XX_RATE(rate, m, p, s, k) */
+ PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+ /* Not in UM, but need for eDP on snow */
+ PLL_36XX_RATE(70500000, 94, 2, 4, 0),
+ { },
+};
+
+static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
+ /* sorted in descending order */
+ /* PLL_36XX_RATE(rate, m, p, s, k) */
+ PLL_36XX_RATE(192000000, 64, 2, 2, 0),
+ PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
+ PLL_36XX_RATE(180000000, 90, 3, 2, 0),
+ PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
+ PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
+ PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
+ PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
+ PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
+ { },
+};
+
+static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
+ [apll] = PLL_A(pll_35xx, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, "fout_apll", NULL),
+ [mpll] = PLL_A(pll_35xx, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, "fout_mpll", NULL),
+ [bpll] = PLL(pll_35xx, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ BPLL_CON0, NULL),
+ [gpll] = PLL(pll_35xx, fout_gpll, "fout_gpll", "fin_pll", GPLL_LOCK,
+ GPLL_CON0, NULL),
+ [cpll] = PLL(pll_35xx, fout_cpll, "fout_cpll", "fin_pll", CPLL_LOCK,
+ CPLL_CON0, NULL),
+ [epll] = PLL(pll_36xx, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ EPLL_CON0, NULL),
+ [vpll] = PLL(pll_36xx, fout_vpll, "fout_vpll", "mout_vpllsrc",
+ VPLL_LOCK, VPLL_CON0, NULL),
};
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
{ },
};
/* register exynox5250 clocks */
-void __init exynos5250_clk_init(struct device_node *np)
+static void __init exynos5250_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *apll, *mpll, *epll, *vpll, *bpll, *gpll, *cpll;
if (np) {
reg_base = of_iomap(np, 0);
@@ -490,22 +558,17 @@ void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
ext_clk_match);
+ samsung_clk_register_mux(exynos5250_pll_pmux_clks,
+ ARRAY_SIZE(exynos5250_pll_pmux_clks));
+
+ if (_get_rate("fin_pll") == 24 * MHZ)
+ exynos5250_plls[epll].rate_table = epll_24mhz_tbl;
- apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
- reg_base + 0x100);
- mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
- reg_base + 0x4100);
- bpll = samsung_clk_register_pll35xx("fout_bpll", "fin_pll",
- reg_base + 0x20110);
- gpll = samsung_clk_register_pll35xx("fout_gpll", "fin_pll",
- reg_base + 0x10150);
- cpll = samsung_clk_register_pll35xx("fout_cpll", "fin_pll",
- reg_base + 0x10120);
- epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
- reg_base + 0x10130);
- vpll = samsung_clk_register_pll36xx("fout_vpll", "mout_vpllsrc",
- reg_base + 0x10140);
+ if (_get_rate("mout_vpllsrc") == 24 * MHZ)
+ exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
+ samsung_clk_register_pll(exynos5250_plls, ARRAY_SIZE(exynos5250_plls),
+ reg_base);
samsung_clk_register_fixed_rate(exynos5250_fixed_rate_clks,
ARRAY_SIZE(exynos5250_fixed_rate_clks));
samsung_clk_register_fixed_factor(exynos5250_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 68a96cb..48c4a93 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -17,13 +17,30 @@
#include <linux/of_address.h>
#include "clk.h"
-#include "clk-pll.h"
+#define APLL_LOCK 0x0
+#define APLL_CON0 0x100
#define SRC_CPU 0x200
#define DIV_CPU0 0x500
#define DIV_CPU1 0x504
#define GATE_BUS_CPU 0x700
#define GATE_SCLK_CPU 0x800
+#define CPLL_LOCK 0x10020
+#define DPLL_LOCK 0x10030
+#define EPLL_LOCK 0x10040
+#define RPLL_LOCK 0x10050
+#define IPLL_LOCK 0x10060
+#define SPLL_LOCK 0x10070
+#define VPLL_LOCK 0x10070
+#define MPLL_LOCK 0x10090
+#define CPLL_CON0 0x10120
+#define DPLL_CON0 0x10128
+#define EPLL_CON0 0x10130
+#define RPLL_CON0 0x10140
+#define IPLL_CON0 0x10150
+#define SPLL_CON0 0x10160
+#define VPLL_CON0 0x10170
+#define MPLL_CON0 0x10180
#define SRC_TOP0 0x10200
#define SRC_TOP1 0x10204
#define SRC_TOP2 0x10208
@@ -75,15 +92,27 @@
#define GATE_TOP_SCLK_MAU 0x1083c
#define GATE_TOP_SCLK_FSYS 0x10840
#define GATE_TOP_SCLK_PERIC 0x10850
+#define BPLL_LOCK 0x20010
+#define BPLL_CON0 0x20110
#define SRC_CDREX 0x20200
+#define KPLL_LOCK 0x28000
+#define KPLL_CON0 0x28100
#define SRC_KFC 0x28200
#define DIV_KFC0 0x28500
+/* list of PLLs */
+enum exynos5420_plls {
+ apll, cpll, dpll, epll, rpll, ipll, spll, vpll, mpll,
+ bpll, kpll,
+ nr_plls /* number of PLLs */
+};
+
enum exynos5420_clks {
none,
/* core clocks */
- fin_pll,
+ fin_pll, fout_apll, fout_cpll, fout_dpll, fout_epll, fout_rpll,
+ fout_ipll, fout_spll, fout_vpll, fout_mpll, fout_bpll, fout_kpll,
/* gate for special clocks (sclk) */
sclk_uart0 = 128, sclk_uart1, sclk_uart2, sclk_uart3, sclk_mmc0,
@@ -91,7 +120,7 @@ enum exynos5420_clks {
sclk_i2s2, sclk_pcm1, sclk_pcm2, sclk_spdif, sclk_hdmi, sclk_pixel,
sclk_dp1, sclk_mipi1, sclk_fimd1, sclk_maudio0, sclk_maupcm0,
sclk_usbd300, sclk_usbd301, sclk_usbphy300, sclk_usbphy301, sclk_unipro,
- sclk_pwm, sclk_gscl_wa, sclk_gscl_wb,
+ sclk_pwm, sclk_gscl_wa, sclk_gscl_wb, sclk_hdmiphy,
/* gate clocks */
aclk66_peric = 256, uart0, uart1, uart2, uart3, i2c0, i2c1, i2c2, i2c3,
@@ -109,7 +138,13 @@ enum exynos5420_clks {
aclk300_gscl = 460, smmu_gscl0, smmu_gscl1, gscl_wa, gscl_wb, gscl0,
gscl1, clk_3aa, aclk266_g2d = 470, sss, slim_sss, mdma0,
aclk333_g2d = 480, g2d, aclk333_432_gscl = 490, smmu_3aa, smmu_fimcl0,
- smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d,
+ smmu_fimcl1, smmu_fimcl3, fimc_lite3, aclk_g3d = 500, g3d, smmu_mixer,
+
+ /* mux clocks */
+ mout_hdmi = 640,
+
+ /* divider clocks */
+ dout_pixel = 768,
nr_clks,
};
@@ -118,7 +153,7 @@ enum exynos5420_clks {
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static __initdata unsigned long exynos5420_clk_regs[] = {
+static unsigned long exynos5420_clk_regs[] __initdata = {
SRC_CPU,
DIV_CPU0,
DIV_CPU1,
@@ -257,29 +292,29 @@ PNAME(audio2_p) = { "fin_pll", "cdclk2", "sclk_dpll", "sclk_mpll",
"sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
PNAME(spdif_p) = { "fin_pll", "dout_audio0", "dout_audio1", "dout_audio2",
"spdif_extclk", "sclk_ipll", "sclk_epll", "sclk_rpll" };
-PNAME(hdmi_p) = { "sclk_hdmiphy", "dout_hdmi_pixel" };
+PNAME(hdmi_p) = { "dout_hdmi_pixel", "sclk_hdmiphy" };
PNAME(maudio0_p) = { "fin_pll", "maudio_clk", "sclk_dpll", "sclk_mpll",
"sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = {
FRATE(fin_pll, "fin_pll", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks generated inside the soc */
-struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
- FRATE(none, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
+static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = {
+ FRATE(sclk_hdmiphy, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
FRATE(none, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
FRATE(none, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
FRATE(none, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
};
-struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = {
FFACTOR(none, "sclk_hsic_12m", "fin_pll", 1, 2, 0),
};
-struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
MUX(none, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2),
MUX(none, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2),
MUX(none, "mout_apll", apll_p, SRC_CPU, 0, 1),
@@ -371,7 +406,7 @@ struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
MUX(none, "mout_mipi1", group2_p, SRC_DISP10, 16, 3),
MUX(none, "mout_dp1", group2_p, SRC_DISP10, 20, 3),
MUX(none, "mout_pixel", group2_p, SRC_DISP10, 24, 3),
- MUX(none, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
+ MUX(mout_hdmi, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1),
/* MAU Block */
MUX(none, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3),
@@ -399,7 +434,7 @@ struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
MUX(none, "mout_spi2", group2_p, SRC_PERIC1, 28, 3),
};
-struct samsung_div_clock exynos5420_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(none, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(none, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV(none, "armclk2", "div_arm", DIV_CPU0, 28, 3),
@@ -431,7 +466,7 @@ struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(none, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4),
DIV(none, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8),
DIV(none, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
- DIV(none, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
+ DIV(dout_pixel, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
/* Audio Block */
DIV(none, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
@@ -479,7 +514,7 @@ struct samsung_div_clock exynos5420_div_clks[] __initdata = {
DIV(none, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8),
};
-struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
/* TODO: Re-verify the CG bits for all the gate clocks */
GATE_A(mct, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0, "mct"),
@@ -696,19 +731,43 @@ struct samsung_gate_clock exynos5420_gate_clks[] __initdata = {
GATE(smmu_mscl0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0, 0),
GATE(smmu_mscl1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0, 0),
GATE(smmu_mscl2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0, 0),
+ GATE(smmu_mixer, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0, 0),
};
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct samsung_pll_clock exynos5420_plls[nr_plls] __initdata = {
+ [apll] = PLL(pll_2550, fout_apll, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, NULL),
+ [cpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, NULL),
+ [dpll] = PLL(pll_2550, fout_dpll, "fout_dpll", "fin_pll", DPLL_LOCK,
+ DPLL_CON0, NULL),
+ [epll] = PLL(pll_2650, fout_epll, "fout_epll", "fin_pll", EPLL_LOCK,
+ EPLL_CON0, NULL),
+ [rpll] = PLL(pll_2650, fout_rpll, "fout_rpll", "fin_pll", RPLL_LOCK,
+ RPLL_CON0, NULL),
+ [ipll] = PLL(pll_2550, fout_ipll, "fout_ipll", "fin_pll", IPLL_LOCK,
+ IPLL_CON0, NULL),
+ [spll] = PLL(pll_2550, fout_spll, "fout_spll", "fin_pll", SPLL_LOCK,
+ SPLL_CON0, NULL),
+ [vpll] = PLL(pll_2550, fout_vpll, "fout_vpll", "fin_pll", VPLL_LOCK,
+ VPLL_CON0, NULL),
+ [mpll] = PLL(pll_2550, fout_mpll, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, NULL),
+ [bpll] = PLL(pll_2550, fout_bpll, "fout_bpll", "fin_pll", BPLL_LOCK,
+ BPLL_CON0, NULL),
+ [kpll] = PLL(pll_2550, fout_kpll, "fout_kpll", "fin_pll", KPLL_LOCK,
+ KPLL_CON0, NULL),
+};
+
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,exynos5420-oscclk", .data = (void *)0, },
{ },
};
/* register exynos5420 clocks */
-void __init exynos5420_clk_init(struct device_node *np)
+static void __init exynos5420_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *apll, *bpll, *cpll, *dpll, *epll, *ipll, *kpll, *mpll;
- struct clk *rpll, *spll, *vpll;
if (np) {
reg_base = of_iomap(np, 0);
@@ -724,30 +783,8 @@ void __init exynos5420_clk_init(struct device_node *np)
samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5420_fixed_rate_ext_clks),
ext_clk_match);
-
- apll = samsung_clk_register_pll35xx("fout_apll", "fin_pll",
- reg_base + 0x100);
- bpll = samsung_clk_register_pll35xx("fout_bpll", "fin_pll",
- reg_base + 0x20110);
- cpll = samsung_clk_register_pll35xx("fout_cpll", "fin_pll",
- reg_base + 0x10120);
- dpll = samsung_clk_register_pll35xx("fout_dpll", "fin_pll",
- reg_base + 0x10128);
- epll = samsung_clk_register_pll36xx("fout_epll", "fin_pll",
- reg_base + 0x10130);
- ipll = samsung_clk_register_pll35xx("fout_ipll", "fin_pll",
- reg_base + 0x10150);
- kpll = samsung_clk_register_pll35xx("fout_kpll", "fin_pll",
- reg_base + 0x28100);
- mpll = samsung_clk_register_pll35xx("fout_mpll", "fin_pll",
- reg_base + 0x10180);
- rpll = samsung_clk_register_pll36xx("fout_rpll", "fin_pll",
- reg_base + 0x10140);
- spll = samsung_clk_register_pll35xx("fout_spll", "fin_pll",
- reg_base + 0x10160);
- vpll = samsung_clk_register_pll35xx("fout_vpll", "fin_pll",
- reg_base + 0x10170);
-
+ samsung_clk_register_pll(exynos5420_plls, ARRAY_SIZE(exynos5420_plls),
+ reg_base);
samsung_clk_register_fixed_rate(exynos5420_fixed_rate_clks,
ARRAY_SIZE(exynos5420_fixed_rate_clks));
samsung_clk_register_fixed_factor(exynos5420_fixed_factor_clks,
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 7d54341..f865894 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -41,12 +41,12 @@ PNAME(mout_armclk_p) = { "cplla", "cpllb" };
PNAME(mout_spi_p) = { "div125", "div200" };
/* fixed rate clocks generated outside the soc */
-struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
FRATE(none, "xtal", NULL, CLK_IS_ROOT, 0),
};
/* fixed rate clocks */
-struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
+static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
FRATE(none, "ppll", NULL, CLK_IS_ROOT, 1000000000),
FRATE(none, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
FRATE(none, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
@@ -55,26 +55,26 @@ struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
};
/* fixed factor clocks */
-struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
+static struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
FFACTOR(none, "div250", "ppll", 1, 4, 0),
FFACTOR(none, "div200", "ppll", 1, 5, 0),
FFACTOR(none, "div125", "div250", 1, 2, 0),
};
/* mux clocks */
-struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
+static struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
MUX(none, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
MUX_A(arm_clk, "arm_clk", mout_armclk_p,
CPU_CLK_STATUS, 0, 1, "armclk"),
};
/* divider clocks */
-struct samsung_div_clock exynos5440_div_clks[] __initdata = {
+static struct samsung_div_clock exynos5440_div_clks[] __initdata = {
DIV(spi_baud, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
};
/* gate clocks */
-struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
+static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
GATE(pb0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
GATE(pr0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
GATE(pr1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
@@ -97,13 +97,13 @@ struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
GATE(cs250_o, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
};
-static __initdata struct of_device_id ext_clk_match[] = {
+static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,clock-xtal", .data = (void *)0, },
{},
};
/* register exynos5440 clocks */
-void __init exynos5440_clk_init(struct device_node *np)
+static void __init exynos5440_clk_init(struct device_node *np)
{
void __iomem *reg_base;
@@ -132,7 +132,7 @@ void __init exynos5440_clk_init(struct device_node *np)
samsung_clk_register_gate(exynos5440_gate_clks,
ARRAY_SIZE(exynos5440_gate_clks));
- pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("armclk"));
+ pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
pr_info("exynos5440 clock initialization complete\n");
}
CLK_OF_DECLARE(exynos5440_clk, "samsung,exynos5440-clock", exynos5440_clk_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 362f12d..529e11d 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -10,31 +10,73 @@
*/
#include <linux/errno.h>
+#include <linux/hrtimer.h>
#include "clk.h"
#include "clk-pll.h"
+#define PLL_TIMEOUT_MS 10
+
+struct samsung_clk_pll {
+ struct clk_hw hw;
+ void __iomem *lock_reg;
+ void __iomem *con_reg;
+ enum samsung_pll_type type;
+ unsigned int rate_count;
+ const struct samsung_pll_rate_table *rate_table;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct samsung_clk_pll, hw)
+
+static const struct samsung_pll_rate_table *samsung_get_pll_settings(
+ struct samsung_clk_pll *pll, unsigned long rate)
+{
+ const struct samsung_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++) {
+ if (rate == rate_table[i].rate)
+ return &rate_table[i];
+ }
+
+ return NULL;
+}
+
+static long samsung_pll_round_rate(struct clk_hw *hw,
+ unsigned long drate, unsigned long *prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll->rate_count; i++) {
+ if (drate >= rate_table[i].rate)
+ return rate_table[i].rate;
+ }
+
+ /* return minimum supported value */
+ return rate_table[i - 1].rate;
+}
+
/*
* PLL35xx Clock Type
*/
+/* Maximum lock time can be 270 * PDIV cycles */
+#define PLL35XX_LOCK_FACTOR (270)
#define PLL35XX_MDIV_MASK (0x3FF)
#define PLL35XX_PDIV_MASK (0x3F)
#define PLL35XX_SDIV_MASK (0x7)
+#define PLL35XX_LOCK_STAT_MASK (0x1)
#define PLL35XX_MDIV_SHIFT (16)
#define PLL35XX_PDIV_SHIFT (8)
#define PLL35XX_SDIV_SHIFT (0)
-
-struct samsung_clk_pll35xx {
- struct clk_hw hw;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll35xx(_hw) container_of(_hw, struct samsung_clk_pll35xx, hw)
+#define PLL35XX_LOCK_STAT_SHIFT (29)
static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll35xx *pll = to_clk_pll35xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
@@ -49,48 +91,80 @@ static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
-static const struct clk_ops samsung_pll35xx_clk_ops = {
- .recalc_rate = samsung_pll35xx_recalc_rate,
-};
-
-struct clk * __init samsung_clk_register_pll35xx(const char *name,
- const char *pname, const void __iomem *con_reg)
+static inline bool samsung_pll35xx_mp_change(
+ const struct samsung_pll_rate_table *rate, u32 pll_con)
{
- struct samsung_clk_pll35xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ u32 old_mdiv, old_pdiv;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
+ old_mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK;
+ old_pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK;
+
+ return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv);
+}
+
+static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 tmp;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
}
- init.name = name;
- init.ops = &samsung_pll35xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ tmp = __raw_readl(pll->con_reg);
- pll->hw.init = &init;
- pll->con_reg = con_reg;
+ if (!(samsung_pll35xx_mp_change(rate, tmp))) {
+ /* If only s change, change just s value only*/
+ tmp &= ~(PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT);
+ tmp |= rate->sdiv << PLL35XX_SDIV_SHIFT;
+ __raw_writel(tmp, pll->con_reg);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
+ return 0;
}
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
-
- return clk;
+ /* Set PLL lock time. */
+ __raw_writel(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ pll->lock_reg);
+
+ /* Change PLL PMS values */
+ tmp &= ~((PLL35XX_MDIV_MASK << PLL35XX_MDIV_SHIFT) |
+ (PLL35XX_PDIV_MASK << PLL35XX_PDIV_SHIFT) |
+ (PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT));
+ tmp |= (rate->mdiv << PLL35XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL35XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL35XX_SDIV_SHIFT);
+ __raw_writel(tmp, pll->con_reg);
+
+ /* wait_lock_time */
+ do {
+ cpu_relax();
+ tmp = __raw_readl(pll->con_reg);
+ } while (!(tmp & (PLL35XX_LOCK_STAT_MASK
+ << PLL35XX_LOCK_STAT_SHIFT)));
+ return 0;
}
+static const struct clk_ops samsung_pll35xx_clk_ops = {
+ .recalc_rate = samsung_pll35xx_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll35xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll35xx_clk_min_ops = {
+ .recalc_rate = samsung_pll35xx_recalc_rate,
+};
+
/*
* PLL36xx Clock Type
*/
+/* Maximum lock time can be 3000 * PDIV cycles */
+#define PLL36XX_LOCK_FACTOR (3000)
#define PLL36XX_KDIV_MASK (0xFFFF)
#define PLL36XX_MDIV_MASK (0x1FF)
@@ -99,18 +173,13 @@ struct clk * __init samsung_clk_register_pll35xx(const char *name,
#define PLL36XX_MDIV_SHIFT (16)
#define PLL36XX_PDIV_SHIFT (8)
#define PLL36XX_SDIV_SHIFT (0)
-
-struct samsung_clk_pll36xx {
- struct clk_hw hw;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll36xx(_hw) container_of(_hw, struct samsung_clk_pll36xx, hw)
+#define PLL36XX_KDIV_SHIFT (0)
+#define PLL36XX_LOCK_STAT_SHIFT (29)
static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
s16 kdiv;
u64 fvco = parent_rate;
@@ -129,68 +198,102 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
-static const struct clk_ops samsung_pll36xx_clk_ops = {
- .recalc_rate = samsung_pll36xx_recalc_rate,
-};
-
-struct clk * __init samsung_clk_register_pll36xx(const char *name,
- const char *pname, const void __iomem *con_reg)
+static inline bool samsung_pll36xx_mpk_change(
+ const struct samsung_pll_rate_table *rate, u32 pll_con0, u32 pll_con1)
{
- struct samsung_clk_pll36xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ u32 old_mdiv, old_pdiv, old_kdiv;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
+ old_mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
+ old_kdiv = (pll_con1 >> PLL36XX_KDIV_SHIFT) & PLL36XX_KDIV_MASK;
+
+ return (rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+ rate->kdiv != old_kdiv);
+}
+
+static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 tmp, pll_con0, pll_con1;
+ const struct samsung_pll_rate_table *rate;
+
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
}
- init.name = name;
- init.ops = &samsung_pll36xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con1 = __raw_readl(pll->con_reg + 4);
- pll->hw.init = &init;
- pll->con_reg = con_reg;
+ if (!(samsung_pll36xx_mpk_change(rate, pll_con0, pll_con1))) {
+ /* If only s change, change just s value only*/
+ pll_con0 &= ~(PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT);
+ pll_con0 |= (rate->sdiv << PLL36XX_SDIV_SHIFT);
+ __raw_writel(pll_con0, pll->con_reg);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
+ return 0;
}
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
-
- return clk;
+ /* Set PLL lock time. */
+ __raw_writel(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg);
+
+ /* Change PLL PMS values */
+ pll_con0 &= ~((PLL36XX_MDIV_MASK << PLL36XX_MDIV_SHIFT) |
+ (PLL36XX_PDIV_MASK << PLL36XX_PDIV_SHIFT) |
+ (PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT));
+ pll_con0 |= (rate->mdiv << PLL36XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL36XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL36XX_SDIV_SHIFT);
+ __raw_writel(pll_con0, pll->con_reg);
+
+ pll_con1 &= ~(PLL36XX_KDIV_MASK << PLL36XX_KDIV_SHIFT);
+ pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT;
+ __raw_writel(pll_con1, pll->con_reg + 4);
+
+ /* wait_lock_time */
+ do {
+ cpu_relax();
+ tmp = __raw_readl(pll->con_reg);
+ } while (!(tmp & (1 << PLL36XX_LOCK_STAT_SHIFT)));
+
+ return 0;
}
+static const struct clk_ops samsung_pll36xx_clk_ops = {
+ .recalc_rate = samsung_pll36xx_recalc_rate,
+ .set_rate = samsung_pll36xx_set_rate,
+ .round_rate = samsung_pll_round_rate,
+};
+
+static const struct clk_ops samsung_pll36xx_clk_min_ops = {
+ .recalc_rate = samsung_pll36xx_recalc_rate,
+};
+
/*
* PLL45xx Clock Type
*/
+#define PLL4502_LOCK_FACTOR 400
+#define PLL4508_LOCK_FACTOR 240
#define PLL45XX_MDIV_MASK (0x3FF)
#define PLL45XX_PDIV_MASK (0x3F)
#define PLL45XX_SDIV_MASK (0x7)
+#define PLL45XX_AFC_MASK (0x1F)
#define PLL45XX_MDIV_SHIFT (16)
#define PLL45XX_PDIV_SHIFT (8)
#define PLL45XX_SDIV_SHIFT (0)
+#define PLL45XX_AFC_SHIFT (0)
-struct samsung_clk_pll45xx {
- struct clk_hw hw;
- enum pll45xx_type type;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll45xx(_hw) container_of(_hw, struct samsung_clk_pll45xx, hw)
+#define PLL45XX_ENABLE BIT(31)
+#define PLL45XX_LOCKED BIT(29)
static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll45xx *pll = to_clk_pll45xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
@@ -208,54 +311,113 @@ static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
-static const struct clk_ops samsung_pll45xx_clk_ops = {
- .recalc_rate = samsung_pll45xx_recalc_rate,
-};
-
-struct clk * __init samsung_clk_register_pll45xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll45xx_type type)
+static bool samsung_pll45xx_mp_change(u32 pll_con0, u32 pll_con1,
+ const struct samsung_pll_rate_table *rate)
{
- struct samsung_clk_pll45xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ u32 old_mdiv, old_pdiv, old_afc;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
+ old_mdiv = (pll_con0 >> PLL45XX_MDIV_SHIFT) & PLL45XX_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL45XX_PDIV_SHIFT) & PLL45XX_PDIV_MASK;
+ old_afc = (pll_con1 >> PLL45XX_AFC_SHIFT) & PLL45XX_AFC_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv
+ || old_afc != rate->afc);
+}
+
+static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con1;
+ ktime_t start;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
}
- init.name = name;
- init.ops = &samsung_pll45xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ con0 = __raw_readl(pll->con_reg);
+ con1 = __raw_readl(pll->con_reg + 0x4);
- pll->hw.init = &init;
- pll->con_reg = con_reg;
- pll->type = type;
+ if (!(samsung_pll45xx_mp_change(con0, con1, rate))) {
+ /* If only s change, change just s value only*/
+ con0 &= ~(PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL45XX_SDIV_SHIFT;
+ __raw_writel(con0, pll->con_reg);
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
+ return 0;
}
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
+ /* Set PLL PMS values. */
+ con0 &= ~((PLL45XX_MDIV_MASK << PLL45XX_MDIV_SHIFT) |
+ (PLL45XX_PDIV_MASK << PLL45XX_PDIV_SHIFT) |
+ (PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT));
+ con0 |= (rate->mdiv << PLL45XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL45XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL45XX_SDIV_SHIFT);
+
+ /* Set PLL AFC value. */
+ con1 = __raw_readl(pll->con_reg + 0x4);
+ con1 &= ~(PLL45XX_AFC_MASK << PLL45XX_AFC_SHIFT);
+ con1 |= (rate->afc << PLL45XX_AFC_SHIFT);
+
+ /* Set PLL lock time. */
+ switch (pll->type) {
+ case pll_4502:
+ __raw_writel(rate->pdiv * PLL4502_LOCK_FACTOR, pll->lock_reg);
+ break;
+ case pll_4508:
+ __raw_writel(rate->pdiv * PLL4508_LOCK_FACTOR, pll->lock_reg);
+ break;
+ default:
+ break;
+ };
+
+ /* Set new configuration. */
+ __raw_writel(con1, pll->con_reg + 0x4);
+ __raw_writel(con0, pll->con_reg);
+
+ /* Wait for locking. */
+ start = ktime_get();
+ while (!(__raw_readl(pll->con_reg) & PLL45XX_LOCKED)) {
+ ktime_t delta = ktime_sub(ktime_get(), start);
+
+ if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
+ pr_err("%s: could not lock PLL %s\n",
+ __func__, __clk_get_name(hw->clk));
+ return -EFAULT;
+ }
+
+ cpu_relax();
+ }
- return clk;
+ return 0;
}
+static const struct clk_ops samsung_pll45xx_clk_ops = {
+ .recalc_rate = samsung_pll45xx_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll45xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll45xx_clk_min_ops = {
+ .recalc_rate = samsung_pll45xx_recalc_rate,
+};
+
/*
* PLL46xx Clock Type
*/
+#define PLL46XX_LOCK_FACTOR 3000
+#define PLL46XX_VSEL_MASK (1)
#define PLL46XX_MDIV_MASK (0x1FF)
#define PLL46XX_PDIV_MASK (0x3F)
#define PLL46XX_SDIV_MASK (0x7)
+#define PLL46XX_VSEL_SHIFT (27)
#define PLL46XX_MDIV_SHIFT (16)
#define PLL46XX_PDIV_SHIFT (8)
#define PLL46XX_SDIV_SHIFT (0)
@@ -263,19 +425,20 @@ struct clk * __init samsung_clk_register_pll45xx(const char *name,
#define PLL46XX_KDIV_MASK (0xFFFF)
#define PLL4650C_KDIV_MASK (0xFFF)
#define PLL46XX_KDIV_SHIFT (0)
+#define PLL46XX_MFR_MASK (0x3F)
+#define PLL46XX_MRR_MASK (0x1F)
+#define PLL46XX_KDIV_SHIFT (0)
+#define PLL46XX_MFR_SHIFT (16)
+#define PLL46XX_MRR_SHIFT (24)
-struct samsung_clk_pll46xx {
- struct clk_hw hw;
- enum pll46xx_type type;
- const void __iomem *con_reg;
-};
-
-#define to_clk_pll46xx(_hw) container_of(_hw, struct samsung_clk_pll46xx, hw)
+#define PLL46XX_ENABLE BIT(31)
+#define PLL46XX_LOCKED BIT(29)
+#define PLL46XX_VSEL BIT(27)
static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct samsung_clk_pll46xx *pll = to_clk_pll46xx(hw);
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1, shift;
u64 fvco = parent_rate;
@@ -295,47 +458,175 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
return (unsigned long)fvco;
}
+static bool samsung_pll46xx_mpk_change(u32 pll_con0, u32 pll_con1,
+ const struct samsung_pll_rate_table *rate)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
+ old_kdiv = (pll_con1 >> PLL46XX_KDIV_SHIFT) & PLL46XX_KDIV_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv
+ || old_kdiv != rate->kdiv);
+}
+
+static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con1, lock;
+ ktime_t start;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ con0 = __raw_readl(pll->con_reg);
+ con1 = __raw_readl(pll->con_reg + 0x4);
+
+ if (!(samsung_pll46xx_mpk_change(con0, con1, rate))) {
+ /* If only s change, change just s value only*/
+ con0 &= ~(PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL46XX_SDIV_SHIFT;
+ __raw_writel(con0, pll->con_reg);
+
+ return 0;
+ }
+
+ /* Set PLL lock time. */
+ lock = rate->pdiv * PLL46XX_LOCK_FACTOR;
+ if (lock > 0xffff)
+ /* Maximum lock time bitfield is 16-bit. */
+ lock = 0xffff;
+
+ /* Set PLL PMS and VSEL values. */
+ con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
+ (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
+ (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) |
+ (PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT));
+ con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) |
+ (rate->pdiv << PLL46XX_PDIV_SHIFT) |
+ (rate->sdiv << PLL46XX_SDIV_SHIFT) |
+ (rate->vsel << PLL46XX_VSEL_SHIFT);
+
+ /* Set PLL K, MFR and MRR values. */
+ con1 = __raw_readl(pll->con_reg + 0x4);
+ con1 &= ~((PLL46XX_KDIV_MASK << PLL46XX_KDIV_SHIFT) |
+ (PLL46XX_MFR_MASK << PLL46XX_MFR_SHIFT) |
+ (PLL46XX_MRR_MASK << PLL46XX_MRR_SHIFT));
+ con1 |= (rate->kdiv << PLL46XX_KDIV_SHIFT) |
+ (rate->mfr << PLL46XX_MFR_SHIFT) |
+ (rate->mrr << PLL46XX_MRR_SHIFT);
+
+ /* Write configuration to PLL */
+ __raw_writel(lock, pll->lock_reg);
+ __raw_writel(con0, pll->con_reg);
+ __raw_writel(con1, pll->con_reg + 0x4);
+
+ /* Wait for locking. */
+ start = ktime_get();
+ while (!(__raw_readl(pll->con_reg) & PLL46XX_LOCKED)) {
+ ktime_t delta = ktime_sub(ktime_get(), start);
+
+ if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
+ pr_err("%s: could not lock PLL %s\n",
+ __func__, __clk_get_name(hw->clk));
+ return -EFAULT;
+ }
+
+ cpu_relax();
+ }
+
+ return 0;
+}
+
static const struct clk_ops samsung_pll46xx_clk_ops = {
.recalc_rate = samsung_pll46xx_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll46xx_set_rate,
+};
+
+static const struct clk_ops samsung_pll46xx_clk_min_ops = {
+ .recalc_rate = samsung_pll46xx_recalc_rate,
};
-struct clk * __init samsung_clk_register_pll46xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll46xx_type type)
+/*
+ * PLL6552 Clock Type
+ */
+
+#define PLL6552_MDIV_MASK 0x3ff
+#define PLL6552_PDIV_MASK 0x3f
+#define PLL6552_SDIV_MASK 0x7
+#define PLL6552_MDIV_SHIFT 16
+#define PLL6552_PDIV_SHIFT 8
+#define PLL6552_SDIV_SHIFT 0
+
+static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- struct samsung_clk_pll46xx *pll;
- struct clk *clk;
- struct clk_init_data init;
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con;
+ u64 fvco = parent_rate;
- pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("%s: could not allocate pll clk %s\n", __func__, name);
- return NULL;
- }
+ pll_con = __raw_readl(pll->con_reg);
+ mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK;
+ pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK;
+ sdiv = (pll_con >> PLL6552_SDIV_SHIFT) & PLL6552_SDIV_MASK;
- init.name = name;
- init.ops = &samsung_pll46xx_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
- init.parent_names = &pname;
- init.num_parents = 1;
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
- pll->hw.init = &init;
- pll->con_reg = con_reg;
- pll->type = type;
+ return (unsigned long)fvco;
+}
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s\n", __func__,
- name);
- kfree(pll);
- }
+static const struct clk_ops samsung_pll6552_clk_ops = {
+ .recalc_rate = samsung_pll6552_recalc_rate,
+};
- if (clk_register_clkdev(clk, name, NULL))
- pr_err("%s: failed to register lookup for %s", __func__, name);
+/*
+ * PLL6553 Clock Type
+ */
- return clk;
+#define PLL6553_MDIV_MASK 0xff
+#define PLL6553_PDIV_MASK 0x3f
+#define PLL6553_SDIV_MASK 0x7
+#define PLL6553_KDIV_MASK 0xffff
+#define PLL6553_MDIV_SHIFT 16
+#define PLL6553_PDIV_SHIFT 8
+#define PLL6553_SDIV_SHIFT 0
+#define PLL6553_KDIV_SHIFT 0
+
+static unsigned long samsung_pll6553_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+ u64 fvco = parent_rate;
+
+ pll_con0 = __raw_readl(pll->con_reg);
+ pll_con1 = __raw_readl(pll->con_reg + 0x4);
+ mdiv = (pll_con0 >> PLL6553_MDIV_SHIFT) & PLL6553_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL6553_PDIV_SHIFT) & PLL6553_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL6553_SDIV_SHIFT) & PLL6553_SDIV_MASK;
+ kdiv = (pll_con1 >> PLL6553_KDIV_SHIFT) & PLL6553_KDIV_MASK;
+
+ fvco *= (mdiv << 16) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= 16;
+
+ return (unsigned long)fvco;
}
+static const struct clk_ops samsung_pll6553_clk_ops = {
+ .recalc_rate = samsung_pll6553_recalc_rate,
+};
+
/*
* PLL2550x Clock Type
*/
@@ -418,3 +709,117 @@ struct clk * __init samsung_clk_register_pll2550x(const char *name,
return clk;
}
+
+static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk,
+ void __iomem *base)
+{
+ struct samsung_clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+ int ret, len;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("%s: could not allocate pll clk %s\n",
+ __func__, pll_clk->name);
+ return;
+ }
+
+ init.name = pll_clk->name;
+ init.flags = pll_clk->flags;
+ init.parent_names = &pll_clk->parent_name;
+ init.num_parents = 1;
+
+ if (pll_clk->rate_table) {
+ /* find count of rates in rate_table */
+ for (len = 0; pll_clk->rate_table[len].rate != 0; )
+ len++;
+
+ pll->rate_count = len;
+ pll->rate_table = kmemdup(pll_clk->rate_table,
+ pll->rate_count *
+ sizeof(struct samsung_pll_rate_table),
+ GFP_KERNEL);
+ WARN(!pll->rate_table,
+ "%s: could not allocate rate table for %s\n",
+ __func__, pll_clk->name);
+ }
+
+ switch (pll_clk->type) {
+ /* clk_ops for 35xx and 2550 are similar */
+ case pll_35xx:
+ case pll_2550:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll35xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll35xx_clk_ops;
+ break;
+ case pll_4500:
+ init.ops = &samsung_pll45xx_clk_min_ops;
+ break;
+ case pll_4502:
+ case pll_4508:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll45xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll45xx_clk_ops;
+ break;
+ /* clk_ops for 36xx and 2650 are similar */
+ case pll_36xx:
+ case pll_2650:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll36xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll36xx_clk_ops;
+ break;
+ case pll_6552:
+ init.ops = &samsung_pll6552_clk_ops;
+ break;
+ case pll_6553:
+ init.ops = &samsung_pll6553_clk_ops;
+ break;
+ case pll_4600:
+ case pll_4650:
+ case pll_4650c:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll46xx_clk_min_ops;
+ else
+ init.ops = &samsung_pll46xx_clk_ops;
+ break;
+ default:
+ pr_warn("%s: Unknown pll type for pll clk %s\n",
+ __func__, pll_clk->name);
+ }
+
+ pll->hw.init = &init;
+ pll->type = pll_clk->type;
+ pll->lock_reg = base + pll_clk->lock_offset;
+ pll->con_reg = base + pll_clk->con_offset;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll clock %s : %ld\n",
+ __func__, pll_clk->name, PTR_ERR(clk));
+ kfree(pll);
+ return;
+ }
+
+ samsung_clk_add_lookup(clk, pll_clk->id);
+
+ if (!pll_clk->alias)
+ return;
+
+ ret = clk_register_clkdev(clk, pll_clk->alias, pll_clk->dev_name);
+ if (ret)
+ pr_err("%s: failed to register lookup for %s : %d",
+ __func__, pll_clk->name, ret);
+}
+
+void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
+ unsigned int nr_pll, void __iomem *base)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < nr_pll; cnt++)
+ _samsung_clk_register_pll(&pll_list[cnt], base);
+}
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index f33786e..6c39030 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -12,28 +12,83 @@
#ifndef __SAMSUNG_CLK_PLL_H
#define __SAMSUNG_CLK_PLL_H
-enum pll45xx_type {
+enum samsung_pll_type {
+ pll_35xx,
+ pll_36xx,
+ pll_2550,
+ pll_2650,
pll_4500,
pll_4502,
- pll_4508
-};
-
-enum pll46xx_type {
+ pll_4508,
pll_4600,
pll_4650,
pll_4650c,
+ pll_6552,
+ pll_6553,
+};
+
+#define PLL_35XX_RATE(_rate, _m, _p, _s) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ }
+
+#define PLL_36XX_RATE(_rate, _m, _p, _s, _k) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ }
+
+#define PLL_45XX_RATE(_rate, _m, _p, _s, _afc) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .afc = (_afc), \
+ }
+
+#define PLL_4600_RATE(_rate, _m, _p, _s, _k, _vsel) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ .vsel = (_vsel), \
+ }
+
+#define PLL_4650_RATE(_rate, _m, _p, _s, _k, _mfr, _mrr, _vsel) \
+ { \
+ .rate = (_rate), \
+ .mdiv = (_m), \
+ .pdiv = (_p), \
+ .sdiv = (_s), \
+ .kdiv = (_k), \
+ .mfr = (_mfr), \
+ .mrr = (_mrr), \
+ .vsel = (_vsel), \
+ }
+
+/* NOTE: Rate table should be kept sorted in descending order. */
+
+struct samsung_pll_rate_table {
+ unsigned int rate;
+ unsigned int pdiv;
+ unsigned int mdiv;
+ unsigned int sdiv;
+ unsigned int kdiv;
+ unsigned int afc;
+ unsigned int mfr;
+ unsigned int mrr;
+ unsigned int vsel;
};
-extern struct clk * __init samsung_clk_register_pll35xx(const char *name,
- const char *pname, const void __iomem *con_reg);
-extern struct clk * __init samsung_clk_register_pll36xx(const char *name,
- const char *pname, const void __iomem *con_reg);
-extern struct clk * __init samsung_clk_register_pll45xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll45xx_type type);
-extern struct clk * __init samsung_clk_register_pll46xx(const char *name,
- const char *pname, const void __iomem *con_reg,
- enum pll46xx_type type);
extern struct clk * __init samsung_clk_register_pll2550x(const char *name,
const char *pname, const void __iomem *reg_base,
const unsigned long offset);
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
new file mode 100644
index 0000000..7d2c842
--- /dev/null
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all S3C64xx SoCs.
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/samsung,s3c64xx-clock.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+/* S3C64xx clock controller register offsets. */
+#define APLL_LOCK 0x000
+#define MPLL_LOCK 0x004
+#define EPLL_LOCK 0x008
+#define APLL_CON 0x00c
+#define MPLL_CON 0x010
+#define EPLL_CON0 0x014
+#define EPLL_CON1 0x018
+#define CLK_SRC 0x01c
+#define CLK_DIV0 0x020
+#define CLK_DIV1 0x024
+#define CLK_DIV2 0x028
+#define HCLK_GATE 0x030
+#define PCLK_GATE 0x034
+#define SCLK_GATE 0x038
+#define MEM0_GATE 0x03c
+#define CLK_SRC2 0x10c
+#define OTHERS 0x900
+
+/* Helper macros to define clock arrays. */
+#define FIXED_RATE_CLOCKS(name) \
+ static struct samsung_fixed_rate_clock name[]
+#define MUX_CLOCKS(name) \
+ static struct samsung_mux_clock name[]
+#define DIV_CLOCKS(name) \
+ static struct samsung_div_clock name[]
+#define GATE_CLOCKS(name) \
+ static struct samsung_gate_clock name[]
+
+/* Helper macros for gate types present on S3C64xx. */
+#define GATE_BUS(_id, cname, pname, o, b) \
+ GATE(_id, cname, pname, o, b, 0, 0)
+#define GATE_SCLK(_id, cname, pname, o, b) \
+ GATE(_id, cname, pname, o, b, CLK_SET_RATE_PARENT, 0)
+#define GATE_ON(_id, cname, pname, o, b) \
+ GATE(_id, cname, pname, o, b, CLK_IGNORE_UNUSED, 0)
+
+/* list of PLLs to be registered */
+enum s3c64xx_plls {
+ apll, mpll, epll,
+};
+
+/*
+ * List of controller registers to be saved and restored during
+ * a suspend/resume cycle.
+ */
+static unsigned long s3c64xx_clk_regs[] __initdata = {
+ APLL_LOCK,
+ MPLL_LOCK,
+ EPLL_LOCK,
+ APLL_CON,
+ MPLL_CON,
+ EPLL_CON0,
+ EPLL_CON1,
+ CLK_SRC,
+ CLK_DIV0,
+ CLK_DIV1,
+ CLK_DIV2,
+ HCLK_GATE,
+ PCLK_GATE,
+ SCLK_GATE,
+};
+
+static unsigned long s3c6410_clk_regs[] __initdata = {
+ CLK_SRC2,
+ MEM0_GATE,
+};
+
+/* List of parent clocks common for all S3C64xx SoCs. */
+PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" };
+PNAME(uart_p) = { "mout_epll", "dout_mpll" };
+PNAME(audio0_p) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk0",
+ "pcmcdclk0", "none", "none", "none" };
+PNAME(audio1_p) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk1",
+ "pcmcdclk0", "none", "none", "none" };
+PNAME(mfc_p) = { "hclkx2", "mout_epll" };
+PNAME(apll_p) = { "fin_pll", "fout_apll" };
+PNAME(mpll_p) = { "fin_pll", "fout_mpll" };
+PNAME(epll_p) = { "fin_pll", "fout_epll" };
+PNAME(hclkx2_p) = { "mout_mpll", "mout_apll" };
+
+/* S3C6400-specific parent clocks. */
+PNAME(scaler_lcd_p6400) = { "mout_epll", "dout_mpll", "none", "none" };
+PNAME(irda_p6400) = { "mout_epll", "dout_mpll", "none", "clk48m" };
+PNAME(uhost_p6400) = { "clk48m", "mout_epll", "dout_mpll", "none" };
+
+/* S3C6410-specific parent clocks. */
+PNAME(clk27_p6410) = { "clk27m", "fin_pll" };
+PNAME(scaler_lcd_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "none" };
+PNAME(irda_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "clk48m" };
+PNAME(uhost_p6410) = { "clk48m", "mout_epll", "dout_mpll", "fin_pll" };
+PNAME(audio2_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk2",
+ "pcmcdclk1", "none", "none", "none" };
+
+/* Fixed rate clocks generated outside the SoC. */
+FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_ext_clks) __initdata = {
+ FRATE(0, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xusbxti", NULL, CLK_IS_ROOT, 0),
+};
+
+/* Fixed rate clocks generated inside the SoC. */
+FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_clks) __initdata = {
+ FRATE(CLK27M, "clk27m", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(CLK48M, "clk48m", NULL, CLK_IS_ROOT, 48000000),
+};
+
+/* List of clock muxes present on all S3C64xx SoCs. */
+MUX_CLOCKS(s3c64xx_mux_clks) __initdata = {
+ MUX_F(0, "mout_syncmux", hclkx2_p, OTHERS, 6, 1, 0, CLK_MUX_READ_ONLY),
+ MUX(MOUT_APLL, "mout_apll", apll_p, CLK_SRC, 0, 1),
+ MUX(MOUT_MPLL, "mout_mpll", mpll_p, CLK_SRC, 1, 1),
+ MUX(MOUT_EPLL, "mout_epll", epll_p, CLK_SRC, 2, 1),
+ MUX(MOUT_MFC, "mout_mfc", mfc_p, CLK_SRC, 4, 1),
+ MUX(MOUT_AUDIO0, "mout_audio0", audio0_p, CLK_SRC, 7, 3),
+ MUX(MOUT_AUDIO1, "mout_audio1", audio1_p, CLK_SRC, 10, 3),
+ MUX(MOUT_UART, "mout_uart", uart_p, CLK_SRC, 13, 1),
+ MUX(MOUT_SPI0, "mout_spi0", spi_mmc_p, CLK_SRC, 14, 2),
+ MUX(MOUT_SPI1, "mout_spi1", spi_mmc_p, CLK_SRC, 16, 2),
+ MUX(MOUT_MMC0, "mout_mmc0", spi_mmc_p, CLK_SRC, 18, 2),
+ MUX(MOUT_MMC1, "mout_mmc1", spi_mmc_p, CLK_SRC, 20, 2),
+ MUX(MOUT_MMC2, "mout_mmc2", spi_mmc_p, CLK_SRC, 22, 2),
+};
+
+/* List of clock muxes present on S3C6400. */
+MUX_CLOCKS(s3c6400_mux_clks) __initdata = {
+ MUX(MOUT_UHOST, "mout_uhost", uhost_p6400, CLK_SRC, 5, 2),
+ MUX(MOUT_IRDA, "mout_irda", irda_p6400, CLK_SRC, 24, 2),
+ MUX(MOUT_LCD, "mout_lcd", scaler_lcd_p6400, CLK_SRC, 26, 2),
+ MUX(MOUT_SCALER, "mout_scaler", scaler_lcd_p6400, CLK_SRC, 28, 2),
+};
+
+/* List of clock muxes present on S3C6410. */
+MUX_CLOCKS(s3c6410_mux_clks) __initdata = {
+ MUX(MOUT_UHOST, "mout_uhost", uhost_p6410, CLK_SRC, 5, 2),
+ MUX(MOUT_IRDA, "mout_irda", irda_p6410, CLK_SRC, 24, 2),
+ MUX(MOUT_LCD, "mout_lcd", scaler_lcd_p6410, CLK_SRC, 26, 2),
+ MUX(MOUT_SCALER, "mout_scaler", scaler_lcd_p6410, CLK_SRC, 28, 2),
+ MUX(MOUT_DAC27, "mout_dac27", clk27_p6410, CLK_SRC, 30, 1),
+ MUX(MOUT_TV27, "mout_tv27", clk27_p6410, CLK_SRC, 31, 1),
+ MUX(MOUT_AUDIO2, "mout_audio2", audio2_p6410, CLK_SRC2, 0, 3),
+};
+
+/* List of clock dividers present on all S3C64xx SoCs. */
+DIV_CLOCKS(s3c64xx_div_clks) __initdata = {
+ DIV(DOUT_MPLL, "dout_mpll", "mout_mpll", CLK_DIV0, 4, 1),
+ DIV(HCLKX2, "hclkx2", "mout_syncmux", CLK_DIV0, 9, 3),
+ DIV(HCLK, "hclk", "hclkx2", CLK_DIV0, 8, 1),
+ DIV(PCLK, "pclk", "hclkx2", CLK_DIV0, 12, 4),
+ DIV(DOUT_SECUR, "dout_secur", "hclkx2", CLK_DIV0, 18, 2),
+ DIV(DOUT_CAM, "dout_cam", "hclkx2", CLK_DIV0, 20, 4),
+ DIV(DOUT_JPEG, "dout_jpeg", "hclkx2", CLK_DIV0, 24, 4),
+ DIV(DOUT_MFC, "dout_mfc", "mout_mfc", CLK_DIV0, 28, 4),
+ DIV(DOUT_MMC0, "dout_mmc0", "mout_mmc0", CLK_DIV1, 0, 4),
+ DIV(DOUT_MMC1, "dout_mmc1", "mout_mmc1", CLK_DIV1, 4, 4),
+ DIV(DOUT_MMC2, "dout_mmc2", "mout_mmc2", CLK_DIV1, 8, 4),
+ DIV(DOUT_LCD, "dout_lcd", "mout_lcd", CLK_DIV1, 12, 4),
+ DIV(DOUT_SCALER, "dout_scaler", "mout_scaler", CLK_DIV1, 16, 4),
+ DIV(DOUT_UHOST, "dout_uhost", "mout_uhost", CLK_DIV1, 20, 4),
+ DIV(DOUT_SPI0, "dout_spi0", "mout_spi0", CLK_DIV2, 0, 4),
+ DIV(DOUT_SPI1, "dout_spi1", "mout_spi1", CLK_DIV2, 4, 4),
+ DIV(DOUT_AUDIO0, "dout_audio0", "mout_audio0", CLK_DIV2, 8, 4),
+ DIV(DOUT_AUDIO1, "dout_audio1", "mout_audio1", CLK_DIV2, 12, 4),
+ DIV(DOUT_UART, "dout_uart", "mout_uart", CLK_DIV2, 16, 4),
+ DIV(DOUT_IRDA, "dout_irda", "mout_irda", CLK_DIV2, 20, 4),
+};
+
+/* List of clock dividers present on S3C6400. */
+DIV_CLOCKS(s3c6400_div_clks) __initdata = {
+ DIV(ARMCLK, "armclk", "mout_apll", CLK_DIV0, 0, 3),
+};
+
+/* List of clock dividers present on S3C6410. */
+DIV_CLOCKS(s3c6410_div_clks) __initdata = {
+ DIV(ARMCLK, "armclk", "mout_apll", CLK_DIV0, 0, 4),
+ DIV(DOUT_FIMC, "dout_fimc", "hclk", CLK_DIV1, 24, 4),
+ DIV(DOUT_AUDIO2, "dout_audio2", "mout_audio2", CLK_DIV2, 24, 4),
+};
+
+/* List of clock gates present on all S3C64xx SoCs. */
+GATE_CLOCKS(s3c64xx_gate_clks) __initdata = {
+ GATE_BUS(HCLK_UHOST, "hclk_uhost", "hclk", HCLK_GATE, 29),
+ GATE_BUS(HCLK_SECUR, "hclk_secur", "hclk", HCLK_GATE, 28),
+ GATE_BUS(HCLK_SDMA1, "hclk_sdma1", "hclk", HCLK_GATE, 27),
+ GATE_BUS(HCLK_SDMA0, "hclk_sdma0", "hclk", HCLK_GATE, 26),
+ GATE_ON(HCLK_DDR1, "hclk_ddr1", "hclk", HCLK_GATE, 24),
+ GATE_BUS(HCLK_USB, "hclk_usb", "hclk", HCLK_GATE, 20),
+ GATE_BUS(HCLK_HSMMC2, "hclk_hsmmc2", "hclk", HCLK_GATE, 19),
+ GATE_BUS(HCLK_HSMMC1, "hclk_hsmmc1", "hclk", HCLK_GATE, 18),
+ GATE_BUS(HCLK_HSMMC0, "hclk_hsmmc0", "hclk", HCLK_GATE, 17),
+ GATE_BUS(HCLK_MDP, "hclk_mdp", "hclk", HCLK_GATE, 16),
+ GATE_BUS(HCLK_DHOST, "hclk_dhost", "hclk", HCLK_GATE, 15),
+ GATE_BUS(HCLK_IHOST, "hclk_ihost", "hclk", HCLK_GATE, 14),
+ GATE_BUS(HCLK_DMA1, "hclk_dma1", "hclk", HCLK_GATE, 13),
+ GATE_BUS(HCLK_DMA0, "hclk_dma0", "hclk", HCLK_GATE, 12),
+ GATE_BUS(HCLK_JPEG, "hclk_jpeg", "hclk", HCLK_GATE, 11),
+ GATE_BUS(HCLK_CAMIF, "hclk_camif", "hclk", HCLK_GATE, 10),
+ GATE_BUS(HCLK_SCALER, "hclk_scaler", "hclk", HCLK_GATE, 9),
+ GATE_BUS(HCLK_2D, "hclk_2d", "hclk", HCLK_GATE, 8),
+ GATE_BUS(HCLK_TV, "hclk_tv", "hclk", HCLK_GATE, 7),
+ GATE_BUS(HCLK_POST0, "hclk_post0", "hclk", HCLK_GATE, 5),
+ GATE_BUS(HCLK_ROT, "hclk_rot", "hclk", HCLK_GATE, 4),
+ GATE_BUS(HCLK_LCD, "hclk_lcd", "hclk", HCLK_GATE, 3),
+ GATE_BUS(HCLK_TZIC, "hclk_tzic", "hclk", HCLK_GATE, 2),
+ GATE_ON(HCLK_INTC, "hclk_intc", "hclk", HCLK_GATE, 1),
+ GATE_ON(PCLK_SKEY, "pclk_skey", "pclk", PCLK_GATE, 24),
+ GATE_ON(PCLK_CHIPID, "pclk_chipid", "pclk", PCLK_GATE, 23),
+ GATE_BUS(PCLK_SPI1, "pclk_spi1", "pclk", PCLK_GATE, 22),
+ GATE_BUS(PCLK_SPI0, "pclk_spi0", "pclk", PCLK_GATE, 21),
+ GATE_BUS(PCLK_HSIRX, "pclk_hsirx", "pclk", PCLK_GATE, 20),
+ GATE_BUS(PCLK_HSITX, "pclk_hsitx", "pclk", PCLK_GATE, 19),
+ GATE_ON(PCLK_GPIO, "pclk_gpio", "pclk", PCLK_GATE, 18),
+ GATE_BUS(PCLK_IIC0, "pclk_iic0", "pclk", PCLK_GATE, 17),
+ GATE_BUS(PCLK_IIS1, "pclk_iis1", "pclk", PCLK_GATE, 16),
+ GATE_BUS(PCLK_IIS0, "pclk_iis0", "pclk", PCLK_GATE, 15),
+ GATE_BUS(PCLK_AC97, "pclk_ac97", "pclk", PCLK_GATE, 14),
+ GATE_BUS(PCLK_TZPC, "pclk_tzpc", "pclk", PCLK_GATE, 13),
+ GATE_BUS(PCLK_TSADC, "pclk_tsadc", "pclk", PCLK_GATE, 12),
+ GATE_BUS(PCLK_KEYPAD, "pclk_keypad", "pclk", PCLK_GATE, 11),
+ GATE_BUS(PCLK_IRDA, "pclk_irda", "pclk", PCLK_GATE, 10),
+ GATE_BUS(PCLK_PCM1, "pclk_pcm1", "pclk", PCLK_GATE, 9),
+ GATE_BUS(PCLK_PCM0, "pclk_pcm0", "pclk", PCLK_GATE, 8),
+ GATE_BUS(PCLK_PWM, "pclk_pwm", "pclk", PCLK_GATE, 7),
+ GATE_BUS(PCLK_RTC, "pclk_rtc", "pclk", PCLK_GATE, 6),
+ GATE_BUS(PCLK_WDT, "pclk_wdt", "pclk", PCLK_GATE, 5),
+ GATE_BUS(PCLK_UART3, "pclk_uart3", "pclk", PCLK_GATE, 4),
+ GATE_BUS(PCLK_UART2, "pclk_uart2", "pclk", PCLK_GATE, 3),
+ GATE_BUS(PCLK_UART1, "pclk_uart1", "pclk", PCLK_GATE, 2),
+ GATE_BUS(PCLK_UART0, "pclk_uart0", "pclk", PCLK_GATE, 1),
+ GATE_BUS(PCLK_MFC, "pclk_mfc", "pclk", PCLK_GATE, 0),
+ GATE_SCLK(SCLK_UHOST, "sclk_uhost", "dout_uhost", SCLK_GATE, 30),
+ GATE_SCLK(SCLK_MMC2_48, "sclk_mmc2_48", "clk48m", SCLK_GATE, 29),
+ GATE_SCLK(SCLK_MMC1_48, "sclk_mmc1_48", "clk48m", SCLK_GATE, 28),
+ GATE_SCLK(SCLK_MMC0_48, "sclk_mmc0_48", "clk48m", SCLK_GATE, 27),
+ GATE_SCLK(SCLK_MMC2, "sclk_mmc2", "dout_mmc2", SCLK_GATE, 26),
+ GATE_SCLK(SCLK_MMC1, "sclk_mmc1", "dout_mmc1", SCLK_GATE, 25),
+ GATE_SCLK(SCLK_MMC0, "sclk_mmc0", "dout_mmc0", SCLK_GATE, 24),
+ GATE_SCLK(SCLK_SPI1_48, "sclk_spi1_48", "clk48m", SCLK_GATE, 23),
+ GATE_SCLK(SCLK_SPI0_48, "sclk_spi0_48", "clk48m", SCLK_GATE, 22),
+ GATE_SCLK(SCLK_SPI1, "sclk_spi1", "dout_spi1", SCLK_GATE, 21),
+ GATE_SCLK(SCLK_SPI0, "sclk_spi0", "dout_spi0", SCLK_GATE, 20),
+ GATE_SCLK(SCLK_DAC27, "sclk_dac27", "mout_dac27", SCLK_GATE, 19),
+ GATE_SCLK(SCLK_TV27, "sclk_tv27", "mout_tv27", SCLK_GATE, 18),
+ GATE_SCLK(SCLK_SCALER27, "sclk_scaler27", "clk27m", SCLK_GATE, 17),
+ GATE_SCLK(SCLK_SCALER, "sclk_scaler", "dout_scaler", SCLK_GATE, 16),
+ GATE_SCLK(SCLK_LCD27, "sclk_lcd27", "clk27m", SCLK_GATE, 15),
+ GATE_SCLK(SCLK_LCD, "sclk_lcd", "dout_lcd", SCLK_GATE, 14),
+ GATE_SCLK(SCLK_POST0_27, "sclk_post0_27", "clk27m", SCLK_GATE, 12),
+ GATE_SCLK(SCLK_POST0, "sclk_post0", "dout_lcd", SCLK_GATE, 10),
+ GATE_SCLK(SCLK_AUDIO1, "sclk_audio1", "dout_audio1", SCLK_GATE, 9),
+ GATE_SCLK(SCLK_AUDIO0, "sclk_audio0", "dout_audio0", SCLK_GATE, 8),
+ GATE_SCLK(SCLK_SECUR, "sclk_secur", "dout_secur", SCLK_GATE, 7),
+ GATE_SCLK(SCLK_IRDA, "sclk_irda", "dout_irda", SCLK_GATE, 6),
+ GATE_SCLK(SCLK_UART, "sclk_uart", "dout_uart", SCLK_GATE, 5),
+ GATE_SCLK(SCLK_MFC, "sclk_mfc", "dout_mfc", SCLK_GATE, 3),
+ GATE_SCLK(SCLK_CAM, "sclk_cam", "dout_cam", SCLK_GATE, 2),
+ GATE_SCLK(SCLK_JPEG, "sclk_jpeg", "dout_jpeg", SCLK_GATE, 1),
+};
+
+/* List of clock gates present on S3C6400. */
+GATE_CLOCKS(s3c6400_gate_clks) __initdata = {
+ GATE_ON(HCLK_DDR0, "hclk_ddr0", "hclk", HCLK_GATE, 23),
+ GATE_SCLK(SCLK_ONENAND, "sclk_onenand", "parent", SCLK_GATE, 4),
+};
+
+/* List of clock gates present on S3C6410. */
+GATE_CLOCKS(s3c6410_gate_clks) __initdata = {
+ GATE_BUS(HCLK_3DSE, "hclk_3dse", "hclk", HCLK_GATE, 31),
+ GATE_ON(HCLK_IROM, "hclk_irom", "hclk", HCLK_GATE, 25),
+ GATE_ON(HCLK_MEM1, "hclk_mem1", "hclk", HCLK_GATE, 22),
+ GATE_ON(HCLK_MEM0, "hclk_mem0", "hclk", HCLK_GATE, 21),
+ GATE_BUS(HCLK_MFC, "hclk_mfc", "hclk", HCLK_GATE, 0),
+ GATE_BUS(PCLK_IIC1, "pclk_iic1", "pclk", PCLK_GATE, 27),
+ GATE_BUS(PCLK_IIS2, "pclk_iis2", "pclk", PCLK_GATE, 26),
+ GATE_SCLK(SCLK_FIMC, "sclk_fimc", "dout_fimc", SCLK_GATE, 13),
+ GATE_SCLK(SCLK_AUDIO2, "sclk_audio2", "dout_audio2", SCLK_GATE, 11),
+ GATE_BUS(MEM0_CFCON, "mem0_cfcon", "hclk_mem0", MEM0_GATE, 5),
+ GATE_BUS(MEM0_ONENAND1, "mem0_onenand1", "hclk_mem0", MEM0_GATE, 4),
+ GATE_BUS(MEM0_ONENAND0, "mem0_onenand0", "hclk_mem0", MEM0_GATE, 3),
+ GATE_BUS(MEM0_NFCON, "mem0_nfcon", "hclk_mem0", MEM0_GATE, 2),
+ GATE_ON(MEM0_SROM, "mem0_srom", "hclk_mem0", MEM0_GATE, 1),
+};
+
+/* List of PLL clocks. */
+static struct samsung_pll_clock s3c64xx_pll_clks[] __initdata = {
+ [apll] = PLL(pll_6552, FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON, NULL),
+ [mpll] = PLL(pll_6552, FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON, NULL),
+ [epll] = PLL(pll_6553, FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+};
+
+/* Aliases for common s3c64xx clocks. */
+static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
+ ALIAS(FOUT_APLL, NULL, "fout_apll"),
+ ALIAS(FOUT_MPLL, NULL, "fout_mpll"),
+ ALIAS(FOUT_EPLL, NULL, "fout_epll"),
+ ALIAS(MOUT_EPLL, NULL, "mout_epll"),
+ ALIAS(DOUT_MPLL, NULL, "dout_mpll"),
+ ALIAS(HCLKX2, NULL, "hclk2"),
+ ALIAS(HCLK, NULL, "hclk"),
+ ALIAS(PCLK, NULL, "pclk"),
+ ALIAS(PCLK, NULL, "clk_uart_baud2"),
+ ALIAS(ARMCLK, NULL, "armclk"),
+ ALIAS(HCLK_UHOST, "s3c2410-ohci", "usb-host"),
+ ALIAS(HCLK_USB, "s3c-hsotg", "otg"),
+ ALIAS(HCLK_HSMMC2, "s3c-sdhci.2", "hsmmc"),
+ ALIAS(HCLK_HSMMC2, "s3c-sdhci.2", "mmc_busclk.0"),
+ ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "hsmmc"),
+ ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
+ ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
+ ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
+ ALIAS(HCLK_DMA1, NULL, "dma1"),
+ ALIAS(HCLK_DMA0, NULL, "dma0"),
+ ALIAS(HCLK_CAMIF, "s3c-camif", "camif"),
+ ALIAS(HCLK_LCD, "s3c-fb", "lcd"),
+ ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi"),
+ ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi"),
+ ALIAS(PCLK_IIC0, "s3c2440-i2c.0", "i2c"),
+ ALIAS(PCLK_IIS1, "samsung-i2s.1", "iis"),
+ ALIAS(PCLK_IIS0, "samsung-i2s.0", "iis"),
+ ALIAS(PCLK_AC97, "samsung-ac97", "ac97"),
+ ALIAS(PCLK_TSADC, "s3c64xx-adc", "adc"),
+ ALIAS(PCLK_KEYPAD, "samsung-keypad", "keypad"),
+ ALIAS(PCLK_PCM1, "samsung-pcm.1", "pcm"),
+ ALIAS(PCLK_PCM0, "samsung-pcm.0", "pcm"),
+ ALIAS(PCLK_PWM, NULL, "timers"),
+ ALIAS(PCLK_RTC, "s3c64xx-rtc", "rtc"),
+ ALIAS(PCLK_WDT, NULL, "watchdog"),
+ ALIAS(PCLK_UART3, "s3c6400-uart.3", "uart"),
+ ALIAS(PCLK_UART2, "s3c6400-uart.2", "uart"),
+ ALIAS(PCLK_UART1, "s3c6400-uart.1", "uart"),
+ ALIAS(PCLK_UART0, "s3c6400-uart.0", "uart"),
+ ALIAS(SCLK_UHOST, "s3c2410-ohci", "usb-bus-host"),
+ ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
+ ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
+ ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
+ ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"),
+ ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"),
+ ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
+ ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
+ ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
+ ALIAS(SCLK_AUDIO0, "samsung-i2s.0", "audio-bus"),
+ ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
+ ALIAS(SCLK_CAM, "s3c-camif", "camera"),
+};
+
+/* Aliases for s3c6400-specific clocks. */
+static struct samsung_clock_alias s3c6400_clock_aliases[] = {
+ /* Nothing to place here yet. */
+};
+
+/* Aliases for s3c6410-specific clocks. */
+static struct samsung_clock_alias s3c6410_clock_aliases[] = {
+ ALIAS(PCLK_IIC1, "s3c2440-i2c.1", "i2c"),
+ ALIAS(PCLK_IIS2, "samsung-i2s.2", "iis"),
+ ALIAS(SCLK_FIMC, "s3c-camif", "fimc"),
+ ALIAS(SCLK_AUDIO2, "samsung-i2s.2", "audio-bus"),
+ ALIAS(MEM0_SROM, NULL, "srom"),
+};
+
+static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f,
+ unsigned long xusbxti_f)
+{
+ s3c64xx_fixed_rate_ext_clks[0].fixed_rate = fin_pll_f;
+ s3c64xx_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f;
+ samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_ext_clks,
+ ARRAY_SIZE(s3c64xx_fixed_rate_ext_clks));
+}
+
+/* Register s3c64xx clocks. */
+void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+ unsigned long xusbxti_f, bool is_s3c6400,
+ void __iomem *reg_base)
+{
+ unsigned long *soc_regs = NULL;
+ unsigned long nr_soc_regs = 0;
+
+ if (np) {
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+ }
+
+ if (!is_s3c6400) {
+ soc_regs = s3c6410_clk_regs;
+ nr_soc_regs = ARRAY_SIZE(s3c6410_clk_regs);
+ }
+
+ samsung_clk_init(np, reg_base, NR_CLKS, s3c64xx_clk_regs,
+ ARRAY_SIZE(s3c64xx_clk_regs), soc_regs, nr_soc_regs);
+
+ /* Register external clocks. */
+ if (!np)
+ s3c64xx_clk_register_fixed_ext(xtal_f, xusbxti_f);
+
+ /* Register PLLs. */
+ samsung_clk_register_pll(s3c64xx_pll_clks,
+ ARRAY_SIZE(s3c64xx_pll_clks), reg_base);
+
+ /* Register common internal clocks. */
+ samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_clks,
+ ARRAY_SIZE(s3c64xx_fixed_rate_clks));
+ samsung_clk_register_mux(s3c64xx_mux_clks,
+ ARRAY_SIZE(s3c64xx_mux_clks));
+ samsung_clk_register_div(s3c64xx_div_clks,
+ ARRAY_SIZE(s3c64xx_div_clks));
+ samsung_clk_register_gate(s3c64xx_gate_clks,
+ ARRAY_SIZE(s3c64xx_gate_clks));
+
+ /* Register SoC-specific clocks. */
+ if (is_s3c6400) {
+ samsung_clk_register_mux(s3c6400_mux_clks,
+ ARRAY_SIZE(s3c6400_mux_clks));
+ samsung_clk_register_div(s3c6400_div_clks,
+ ARRAY_SIZE(s3c6400_div_clks));
+ samsung_clk_register_gate(s3c6400_gate_clks,
+ ARRAY_SIZE(s3c6400_gate_clks));
+ samsung_clk_register_alias(s3c6400_clock_aliases,
+ ARRAY_SIZE(s3c6400_clock_aliases));
+ } else {
+ samsung_clk_register_mux(s3c6410_mux_clks,
+ ARRAY_SIZE(s3c6410_mux_clks));
+ samsung_clk_register_div(s3c6410_div_clks,
+ ARRAY_SIZE(s3c6410_div_clks));
+ samsung_clk_register_gate(s3c6410_gate_clks,
+ ARRAY_SIZE(s3c6410_gate_clks));
+ samsung_clk_register_alias(s3c6410_clock_aliases,
+ ARRAY_SIZE(s3c6410_clock_aliases));
+ }
+
+ samsung_clk_register_alias(s3c64xx_clock_aliases,
+ ARRAY_SIZE(s3c64xx_clock_aliases));
+
+ pr_info("%s clocks: apll = %lu, mpll = %lu\n"
+ "\tepll = %lu, arm_clk = %lu\n",
+ is_s3c6400 ? "S3C6400" : "S3C6410",
+ _get_rate("fout_apll"), _get_rate("fout_mpll"),
+ _get_rate("fout_epll"), _get_rate("armclk"));
+}
+
+static void __init s3c6400_clk_init(struct device_node *np)
+{
+ s3c64xx_clk_init(np, 0, 0, true, NULL);
+}
+CLK_OF_DECLARE(s3c6400_clk, "samsung,s3c6400-clock", s3c6400_clk_init);
+
+static void __init s3c6410_clk_init(struct device_node *np)
+{
+ s3c64xx_clk_init(np, 0, 0, false, NULL);
+}
+CLK_OF_DECLARE(s3c6410_clk, "samsung,s3c6410-clock", s3c6410_clk_init);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index cd3c40a..f503f32 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -307,14 +307,12 @@ void __init samsung_clk_of_register_fixed_ext(
unsigned long _get_rate(const char *clk_name)
{
struct clk *clk;
- unsigned long rate;
- clk = clk_get(NULL, clk_name);
- if (IS_ERR(clk)) {
+ clk = __clk_lookup(clk_name);
+ if (!clk) {
pr_err("%s: could not find clock %s\n", __func__, clk_name);
return 0;
}
- rate = clk_get_rate(clk);
- clk_put(clk);
- return rate;
+
+ return clk_get_rate(clk);
}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 2f7dba2..31b4174 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include "clk-pll.h"
/**
* struct samsung_clock_alias: information about mux clock
@@ -39,6 +40,8 @@ struct samsung_clock_alias {
.alias = a, \
}
+#define MHZ (1000 * 1000)
+
/**
* struct samsung_fixed_rate_clock: information about fixed-rate clock
* @id: platform specific id of the clock.
@@ -127,7 +130,7 @@ struct samsung_mux_clock {
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
- .flags = f, \
+ .flags = (f) | CLK_SET_RATE_NO_REPARENT, \
.offset = o, \
.shift = s, \
.width = w, \
@@ -261,6 +264,54 @@ struct samsung_clk_reg_dump {
u32 value;
};
+/**
+ * struct samsung_pll_clock: information about pll clock
+ * @id: platform specific id of the clock.
+ * @dev_name: name of the device to which this clock belongs.
+ * @name: name of this pll clock.
+ * @parent_name: name of the parent clock.
+ * @flags: optional flags for basic clock.
+ * @con_offset: offset of the register for configuring the PLL.
+ * @lock_offset: offset of the register for locking the PLL.
+ * @type: Type of PLL to be registered.
+ * @alias: optional clock alias name to be assigned to this clock.
+ */
+struct samsung_pll_clock {
+ unsigned int id;
+ const char *dev_name;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+ int con_offset;
+ int lock_offset;
+ enum samsung_pll_type type;
+ const struct samsung_pll_rate_table *rate_table;
+ const char *alias;
+};
+
+#define __PLL(_typ, _id, _dname, _name, _pname, _flags, _lock, _con, \
+ _rtable, _alias) \
+ { \
+ .id = _id, \
+ .type = _typ, \
+ .dev_name = _dname, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .flags = CLK_GET_RATE_NOCACHE, \
+ .con_offset = _con, \
+ .lock_offset = _lock, \
+ .rate_table = _rtable, \
+ .alias = _alias, \
+ }
+
+#define PLL(_typ, _id, _name, _pname, _lock, _con, _rtable) \
+ __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
+ _lock, _con, _rtable, _name)
+
+#define PLL_A(_typ, _id, _name, _pname, _lock, _con, _alias, _rtable) \
+ __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
+ _lock, _con, _rtable, _alias)
+
extern void __init samsung_clk_init(struct device_node *np, void __iomem *base,
unsigned long nr_clks, unsigned long *rdump,
unsigned long nr_rdump, unsigned long *soc_rdump,
@@ -284,6 +335,8 @@ extern void __init samsung_clk_register_div(struct samsung_div_clock *clk_list,
unsigned int nr_clk);
extern void __init samsung_clk_register_gate(
struct samsung_gate_clock *clk_list, unsigned int nr_clk);
+extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list,
+ unsigned int nr_clk, void __iomem *base);
extern unsigned long _get_rate(const char *clk_name);
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index aedbbe1..65894f7 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -416,9 +416,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
- SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PLL_CFG, SPEAR1310_PLL1_CLK_SHIFT,
+ SPEAR1310_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco1_mclk", NULL);
clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
@@ -427,9 +427,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "pll1_clk", NULL);
clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
- SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PLL_CFG, SPEAR1310_PLL2_CLK_SHIFT,
+ SPEAR1310_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco2_mclk", NULL);
clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
@@ -438,9 +438,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "pll2_clk", NULL);
clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
- SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PLL_CFG, SPEAR1310_PLL3_CLK_SHIFT,
+ SPEAR1310_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco3_mclk", NULL);
clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
@@ -515,9 +515,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* gpt clocks */
clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT0_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt0_mclk", NULL);
clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
@@ -525,9 +525,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "gpt0");
clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT1_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
@@ -535,9 +535,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "gpt1");
clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT2_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
@@ -545,9 +545,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "gpt2");
clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
- SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GPT3_CLK_SHIFT,
+ SPEAR1310_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt3_mclk", NULL);
clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
@@ -562,7 +562,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
- ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_UART_CLK_SHIFT,
SPEAR1310_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -602,7 +603,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
- ARRAY_SIZE(c3_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(c3_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_C3_CLK_SHIFT,
SPEAR1310_C3_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "c3_mclk", NULL);
@@ -614,8 +616,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* gmac */
clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
- ARRAY_SIZE(gmac_phy_input_parents), 0,
- SPEAR1310_GMAC_CLK_CFG,
+ ARRAY_SIZE(gmac_phy_input_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_GMAC_CLK_CFG,
SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "phy_input_mclk", NULL);
@@ -627,15 +629,16 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
- ARRAY_SIZE(gmac_phy_parents), 0,
+ ARRAY_SIZE(gmac_phy_parents), CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.0", NULL);
/* clcd */
clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
- ARRAY_SIZE(clcd_synth_parents), 0,
- SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+ ARRAY_SIZE(clcd_synth_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_CLCD_CLK_SYNT,
+ SPEAR1310_CLCD_SYNT_CLK_SHIFT,
SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
@@ -645,7 +648,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, "clcd_syn_clk", NULL);
clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
- ARRAY_SIZE(clcd_pixel_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(clcd_pixel_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_mclk", NULL);
@@ -657,9 +661,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* i2s */
clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
- ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
- SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(i2s_src_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_SRC_CLK_SHIFT,
+ SPEAR1310_I2S_SRC_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_src_mclk", NULL);
clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
@@ -668,7 +672,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
- ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(i2s_ref_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1310_I2S_CLK_CFG, SPEAR1310_I2S_REF_SHIFT,
SPEAR1310_I2S_REF_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_ref_mclk", NULL);
@@ -806,13 +811,15 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
/* RAS clks */
clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
- ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+ ARRAY_SIZE(gen_synth0_1_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_PLL_CFG,
SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
- ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+ ARRAY_SIZE(gen_synth2_3_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_PLL_CFG,
SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
@@ -929,8 +936,8 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
smii_rgmii_phy_parents,
- ARRAY_SIZE(smii_rgmii_phy_parents), 0,
- SPEAR1310_RAS_CTRL_REG1,
+ ARRAY_SIZE(smii_rgmii_phy_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1310_RAS_CTRL_REG1,
SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
SPEAR1310_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.1", NULL);
@@ -938,15 +945,15 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, "stmmacphy.4", NULL);
clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
- ARRAY_SIZE(rmii_phy_parents), 0,
+ ARRAY_SIZE(rmii_phy_parents), CLK_SET_RATE_NO_REPARENT,
SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
SPEAR1310_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.3", NULL);
clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART1_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart1_mclk", NULL);
clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
@@ -955,9 +962,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5c800000.serial");
clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART2_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart2_mclk", NULL);
clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
@@ -966,9 +973,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5c900000.serial");
clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART3_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart3_mclk", NULL);
clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
@@ -977,9 +984,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5ca00000.serial");
clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART4_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart4_mclk", NULL);
clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
@@ -988,9 +995,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cb00000.serial");
clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_UART5_CLK_SHIFT,
+ SPEAR1310_RAS_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart5_mclk", NULL);
clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
@@ -999,9 +1006,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cc00000.serial");
clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C1_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c1_mclk", NULL);
clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
@@ -1010,9 +1017,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cd00000.i2c");
clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C2_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c2_mclk", NULL);
clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
@@ -1021,9 +1028,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5ce00000.i2c");
clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C3_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c3_mclk", NULL);
clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
@@ -1032,9 +1039,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5cf00000.i2c");
clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C4_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c4_mclk", NULL);
clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
@@ -1043,9 +1050,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d000000.i2c");
clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C5_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c5_mclk", NULL);
clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
@@ -1054,9 +1061,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d100000.i2c");
clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C6_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c6_mclk", NULL);
clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
@@ -1065,9 +1072,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d200000.i2c");
clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
- ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(i2c_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_I2C7_CLK_SHIFT,
+ SPEAR1310_I2C_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2c7_mclk", NULL);
clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
@@ -1076,9 +1083,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d300000.i2c");
clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
- ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(ssp1_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_SSP1_CLK_SHIFT,
+ SPEAR1310_SSP1_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ssp1_mclk", NULL);
clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
@@ -1087,9 +1094,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "5d400000.spi");
clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
- ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(pci_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_PCI_CLK_SHIFT,
+ SPEAR1310_PCI_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "pci_mclk", NULL);
clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
@@ -1098,9 +1105,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "pci");
clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
- ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(tdm_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM1_CLK_SHIFT,
+ SPEAR1310_TDM_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "tdm1_mclk", NULL);
clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
@@ -1109,9 +1116,9 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
- ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
- SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(tdm_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1310_RAS_CTRL_REG0, SPEAR1310_TDM2_CLK_SHIFT,
+ SPEAR1310_TDM_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "tdm2_mclk", NULL);
clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 9d0b394..fe835c1 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -473,9 +473,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
- SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PLL_CFG, SPEAR1340_PLL1_CLK_SHIFT,
+ SPEAR1340_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco1_mclk", NULL);
clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
@@ -484,9 +484,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "pll1_clk", NULL);
clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
- SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PLL_CFG, SPEAR1340_PLL2_CLK_SHIFT,
+ SPEAR1340_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco2_mclk", NULL);
clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
@@ -495,9 +495,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "pll2_clk", NULL);
clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
- ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
- SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(vco_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PLL_CFG, SPEAR1340_PLL3_CLK_SHIFT,
+ SPEAR1340_PLL_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "vco3_mclk", NULL);
clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
@@ -561,8 +561,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "amba_syn_clk", NULL);
clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
- ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
- SPEAR1340_SCLK_SRC_SEL_SHIFT,
+ ARRAY_SIZE(sys_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_SYS_CLK_CTRL, SPEAR1340_SCLK_SRC_SEL_SHIFT,
SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "sys_mclk", NULL);
@@ -583,8 +583,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "smp_twd");
clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
- ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
- SPEAR1340_HCLK_SRC_SEL_SHIFT,
+ ARRAY_SIZE(ahb_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_SYS_CLK_CTRL, SPEAR1340_HCLK_SRC_SEL_SHIFT,
SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "ahb_clk", NULL);
@@ -594,9 +594,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* gpt clocks */
clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT0_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt0_mclk", NULL);
clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
@@ -604,9 +604,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "gpt0");
clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT1_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
@@ -614,9 +614,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "gpt1");
clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT2_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
@@ -624,9 +624,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "gpt2");
clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
- ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gpt_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GPT3_CLK_SHIFT,
+ SPEAR1340_GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt3_mclk", NULL);
clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
@@ -641,7 +641,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
- ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART0_CLK_SHIFT,
SPEAR1340_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -658,9 +659,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
- ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
- SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(uart1_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_UART1_CLK_SHIFT,
+ SPEAR1340_UART_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "uart1_mclk", NULL);
clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
@@ -698,7 +699,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
- ARRAY_SIZE(c3_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(c3_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_C3_CLK_SHIFT,
SPEAR1340_C3_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "c3_mclk", NULL);
@@ -710,8 +712,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* gmac */
clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
- ARRAY_SIZE(gmac_phy_input_parents), 0,
- SPEAR1340_GMAC_CLK_CFG,
+ ARRAY_SIZE(gmac_phy_input_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_GMAC_CLK_CFG,
SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "phy_input_mclk", NULL);
@@ -723,15 +725,16 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
- ARRAY_SIZE(gmac_phy_parents), 0,
+ ARRAY_SIZE(gmac_phy_parents), CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "stmmacphy.0", NULL);
/* clcd */
clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
- ARRAY_SIZE(clcd_synth_parents), 0,
- SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+ ARRAY_SIZE(clcd_synth_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_CLCD_CLK_SYNT,
+ SPEAR1340_CLCD_SYNT_CLK_SHIFT,
SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
@@ -741,7 +744,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "clcd_syn_clk", NULL);
clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
- ARRAY_SIZE(clcd_pixel_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(clcd_pixel_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_mclk", NULL);
@@ -753,9 +757,9 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* i2s */
clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
- ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
- SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
- 0, &_lock);
+ ARRAY_SIZE(i2s_src_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_SRC_CLK_SHIFT,
+ SPEAR1340_I2S_SRC_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_src_mclk", NULL);
clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk",
@@ -765,7 +769,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
- ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(i2s_ref_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_I2S_CLK_CFG, SPEAR1340_I2S_REF_SHIFT,
SPEAR1340_I2S_REF_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_ref_mclk", NULL);
@@ -891,13 +896,15 @@ void __init spear1340_clk_init(void __iomem *misc_base)
/* RAS clks */
clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
- ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+ ARRAY_SIZE(gen_synth0_1_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_PLL_CFG,
SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn0_1_mclk", NULL);
clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
- ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+ ARRAY_SIZE(gen_synth2_3_parents),
+ CLK_SET_RATE_NO_REPARENT, SPEAR1340_PLL_CFG,
SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen_syn2_3_mclk", NULL);
@@ -938,7 +945,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "spear_cec.1");
clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
- ARRAY_SIZE(spdif_out_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(spdif_out_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "spdif_out_mclk", NULL);
@@ -949,7 +957,8 @@ void __init spear1340_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, NULL, "d0000000.spdif-out");
clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
- ARRAY_SIZE(spdif_in_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(spdif_in_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "spdif_in_mclk", NULL);
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 080c3c5..c2d2043 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -294,7 +294,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
clk_register_clkdev(clk, NULL, "a9400000.i2s");
clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
- ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(i2s_ref_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_CONTROL_REG, I2S_REF_PCLK_SHIFT,
I2S_REF_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "i2s_ref_clk", NULL);
@@ -313,57 +314,66 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
clk_register_clkdev(clk, "hclk", "ab000000.eth");
clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_RS485_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a9300000.serial");
clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
- ARRAY_SIZE(sdhci_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(sdhci_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_CONTROL_REG, SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK,
0, &_lock);
clk_register_clkdev(clk, NULL, "70000000.sdhci");
clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
- ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
- SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+ ARRAY_SIZE(smii0_parents), CLK_SET_RATE_NO_REPARENT,
+ SPEAR320_CONTROL_REG, SMII_PCLK_SHIFT, SMII_PCLK_MASK,
+ 0, &_lock);
clk_register_clkdev(clk, NULL, "smii_pclk");
clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
clk_register_clkdev(clk, NULL, "smii");
clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
0, &_lock);
clk_register_clkdev(clk, NULL, "a3000000.serial");
clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a4000000.serial");
clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART3_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a9100000.serial");
clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART4_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "a9200000.serial");
clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART5_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "60000000.serial");
clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
- ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uartx_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
SPEAR320_EXT_CTRL_REG, SPEAR320_UART6_PCLK_SHIFT,
SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "60100000.serial");
@@ -427,7 +437,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
- ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(uart0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0,
&_lock);
clk_register_clkdev(clk, "uart0_mclk", NULL);
@@ -444,7 +455,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
- ARRAY_SIZE(firda_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(firda_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0,
&_lock);
clk_register_clkdev(clk, "firda_mclk", NULL);
@@ -458,14 +470,16 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
- ARRAY_SIZE(gpt0_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(gpt0_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
- ARRAY_SIZE(gpt1_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(gpt1_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk",
@@ -476,7 +490,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
- ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_PARENT,
+ ARRAY_SIZE(gpt2_parents),
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk",
@@ -498,9 +513,9 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
- ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
- GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(gen2_3_parents), CLK_SET_RATE_NO_REPARENT,
+ CORE_CLK_CFG, GEN_SYNTH2_3_CLK_SHIFT,
+ GEN_SYNTH2_3_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
@@ -540,8 +555,8 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
clk_register_clkdev(clk, "ahbmult2_clk", NULL);
clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
- ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
- MCTR_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(ddr_parents), CLK_SET_RATE_NO_REPARENT,
+ PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ddr_clk", NULL);
clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 9406f24..4f649c9 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -169,8 +169,9 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
- ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
- UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(uart_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0,
+ &_lock);
clk_register_clkdev(clk, "uart_mclk", NULL);
clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
@@ -188,8 +189,9 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
- ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
- FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(firda_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0,
+ &_lock);
clk_register_clkdev(clk, "firda_mclk", NULL);
clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
@@ -203,8 +205,9 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
- ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
- CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(clcd_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0,
+ &_lock);
clk_register_clkdev(clk, "clcd_mclk", NULL);
clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
@@ -217,13 +220,13 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
- ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
- GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt0_1_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
- ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
- GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt0_1_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt1_mclk", NULL);
clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
@@ -235,8 +238,8 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
- ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
- GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt2_mclk", NULL);
clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
@@ -248,8 +251,8 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
- ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
- GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(gpt3_parents), CLK_SET_RATE_NO_REPARENT,
+ PERIP_CLK_CFG, GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "gpt3_mclk", NULL);
clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
@@ -277,8 +280,8 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
clk_register_clkdev(clk, "ahbmult2_clk", NULL);
clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
- ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
- MCTR_CLK_MASK, 0, &_lock);
+ ARRAY_SIZE(ddr_parents), CLK_SET_RATE_NO_REPARENT,
+ PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ddr_clk", NULL);
clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 412912b..34ee69f 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -25,12 +25,12 @@
static DEFINE_SPINLOCK(clk_lock);
/**
- * sunxi_osc_clk_setup() - Setup function for gatable oscillator
+ * sun4i_osc_clk_setup() - Setup function for gatable oscillator
*/
#define SUNXI_OSC24M_GATE 0
-static void __init sunxi_osc_clk_setup(struct device_node *node)
+static void __init sun4i_osc_clk_setup(struct device_node *node)
{
struct clk *clk;
struct clk_fixed_rate *fixed;
@@ -64,22 +64,23 @@ static void __init sunxi_osc_clk_setup(struct device_node *node)
&gate->hw, &clk_gate_ops,
CLK_IS_ROOT);
- if (clk) {
+ if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
}
+CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup);
/**
- * sunxi_get_pll1_factors() - calculates n, k, m, p factors for PLL1
+ * sun4i_get_pll1_factors() - calculates n, k, m, p factors for PLL1
* PLL1 rate is calculated as follows
* rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
* parent_rate is always 24Mhz
*/
-static void sunxi_get_pll1_factors(u32 *freq, u32 parent_rate,
+static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
u8 *n, u8 *k, u8 *m, u8 *p)
{
u8 div;
@@ -124,15 +125,97 @@ static void sunxi_get_pll1_factors(u32 *freq, u32 parent_rate,
*n = div / 4;
}
+/**
+ * sun6i_a31_get_pll1_factors() - calculates n, k and m factors for PLL1
+ * PLL1 rate is calculated as follows
+ * rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
+ * parent_rate should always be 24MHz
+ */
+static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ /*
+ * We can operate only on MHz, this will make our life easier
+ * later.
+ */
+ u32 freq_mhz = *freq / 1000000;
+ u32 parent_freq_mhz = parent_rate / 1000000;
+
+ /*
+ * Round down the frequency to the closest multiple of either
+ * 6 or 16
+ */
+ u32 round_freq_6 = round_down(freq_mhz, 6);
+ u32 round_freq_16 = round_down(freq_mhz, 16);
+
+ if (round_freq_6 > round_freq_16)
+ freq_mhz = round_freq_6;
+ else
+ freq_mhz = round_freq_16;
+
+ *freq = freq_mhz * 1000000;
+ /*
+ * If the factors pointer are null, we were just called to
+ * round down the frequency.
+ * Exit.
+ */
+ if (n == NULL)
+ return;
+
+ /* If the frequency is a multiple of 32 MHz, k is always 3 */
+ if (!(freq_mhz % 32))
+ *k = 3;
+ /* If the frequency is a multiple of 9 MHz, k is always 2 */
+ else if (!(freq_mhz % 9))
+ *k = 2;
+ /* If the frequency is a multiple of 8 MHz, k is always 1 */
+ else if (!(freq_mhz % 8))
+ *k = 1;
+ /* Otherwise, we don't use the k factor */
+ else
+ *k = 0;
+
+ /*
+ * If the frequency is a multiple of 2 but not a multiple of
+ * 3, m is 3. This is the first time we use 6 here, yet we
+ * will use it on several other places.
+ * We use this number because it's the lowest frequency we can
+ * generate (with n = 0, k = 0, m = 3), so every other frequency
+ * somehow relates to this frequency.
+ */
+ if ((freq_mhz % 6) == 2 || (freq_mhz % 6) == 4)
+ *m = 2;
+ /*
+ * If the frequency is a multiple of 6MHz, but the factor is
+ * odd, m will be 3
+ */
+ else if ((freq_mhz / 6) & 1)
+ *m = 3;
+ /* Otherwise, we end up with m = 1 */
+ else
+ *m = 1;
+
+ /* Calculate n thanks to the above factors we already got */
+ *n = freq_mhz * (*m + 1) / ((*k + 1) * parent_freq_mhz) - 1;
+
+ /*
+ * If n end up being outbound, and that we can still decrease
+ * m, do it.
+ */
+ if ((*n + 1) > 31 && (*m + 1) > 1) {
+ *n = (*n + 1) / 2 - 1;
+ *m = (*m + 1) / 2 - 1;
+ }
+}
/**
- * sunxi_get_apb1_factors() - calculates m, p factors for APB1
+ * sun4i_get_apb1_factors() - calculates m, p factors for APB1
* APB1 rate is calculated as follows
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sunxi_get_apb1_factors(u32 *freq, u32 parent_rate,
+static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
u8 *n, u8 *k, u8 *m, u8 *p)
{
u8 calcm, calcp;
@@ -178,7 +261,7 @@ struct factors_data {
void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
};
-static struct clk_factors_config pll1_config = {
+static struct clk_factors_config sun4i_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -189,21 +272,35 @@ static struct clk_factors_config pll1_config = {
.pwidth = 2,
};
-static struct clk_factors_config apb1_config = {
+static struct clk_factors_config sun6i_a31_pll1_config = {
+ .nshift = 8,
+ .nwidth = 5,
+ .kshift = 4,
+ .kwidth = 2,
+ .mshift = 0,
+ .mwidth = 2,
+};
+
+static struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
.pshift = 16,
.pwidth = 2,
};
-static const __initconst struct factors_data pll1_data = {
- .table = &pll1_config,
- .getter = sunxi_get_pll1_factors,
+static const struct factors_data sun4i_pll1_data __initconst = {
+ .table = &sun4i_pll1_config,
+ .getter = sun4i_get_pll1_factors,
};
-static const __initconst struct factors_data apb1_data = {
- .table = &apb1_config,
- .getter = sunxi_get_apb1_factors,
+static const struct factors_data sun6i_a31_pll1_data __initconst = {
+ .table = &sun6i_a31_pll1_config,
+ .getter = sun6i_a31_get_pll1_factors,
+};
+
+static const struct factors_data sun4i_apb1_data __initconst = {
+ .table = &sun4i_apb1_config,
+ .getter = sun4i_get_apb1_factors,
};
static void __init sunxi_factors_clk_setup(struct device_node *node,
@@ -221,7 +318,7 @@ static void __init sunxi_factors_clk_setup(struct device_node *node,
clk = clk_register_factors(NULL, clk_name, parent, 0, reg,
data->table, data->getter, &clk_lock);
- if (clk) {
+ if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
@@ -239,11 +336,15 @@ struct mux_data {
u8 shift;
};
-static const __initconst struct mux_data cpu_mux_data = {
+static const struct mux_data sun4i_cpu_mux_data __initconst = {
.shift = 16,
};
-static const __initconst struct mux_data apb1_mux_data = {
+static const struct mux_data sun6i_a31_ahb1_mux_data __initconst = {
+ .shift = 12,
+};
+
+static const struct mux_data sun4i_apb1_mux_data __initconst = {
.shift = 24,
};
@@ -261,7 +362,8 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
while (i < 5 && (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
i++;
- clk = clk_register_mux(NULL, clk_name, parents, i, 0, reg,
+ clk = clk_register_mux(NULL, clk_name, parents, i,
+ CLK_SET_RATE_NO_REPARENT, reg,
data->shift, SUNXI_MUX_GATE_WIDTH,
0, &clk_lock);
@@ -277,26 +379,34 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
* sunxi_divider_clk_setup() - Setup function for simple divider clocks
*/
-#define SUNXI_DIVISOR_WIDTH 2
-
struct div_data {
- u8 shift;
- u8 pow;
+ u8 shift;
+ u8 pow;
+ u8 width;
};
-static const __initconst struct div_data axi_data = {
- .shift = 0,
- .pow = 0,
+static const struct div_data sun4i_axi_data __initconst = {
+ .shift = 0,
+ .pow = 0,
+ .width = 2,
};
-static const __initconst struct div_data ahb_data = {
- .shift = 4,
- .pow = 1,
+static const struct div_data sun4i_ahb_data __initconst = {
+ .shift = 4,
+ .pow = 1,
+ .width = 2,
};
-static const __initconst struct div_data apb0_data = {
- .shift = 8,
- .pow = 1,
+static const struct div_data sun4i_apb0_data __initconst = {
+ .shift = 8,
+ .pow = 1,
+ .width = 2,
+};
+
+static const struct div_data sun6i_a31_apb2_div_data __initconst = {
+ .shift = 0,
+ .pow = 0,
+ .width = 4,
};
static void __init sunxi_divider_clk_setup(struct device_node *node,
@@ -312,7 +422,7 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
clk_parent = of_clk_get_parent_name(node, 0);
clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
- reg, data->shift, SUNXI_DIVISOR_WIDTH,
+ reg, data->shift, data->width,
data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
&clk_lock);
if (clk) {
@@ -333,34 +443,70 @@ struct gates_data {
DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
};
-static const __initconst struct gates_data sun4i_axi_gates_data = {
+static const struct gates_data sun4i_axi_gates_data __initconst = {
.mask = {1},
};
-static const __initconst struct gates_data sun4i_ahb_gates_data = {
+static const struct gates_data sun4i_ahb_gates_data __initconst = {
.mask = {0x7F77FFF, 0x14FB3F},
};
-static const __initconst struct gates_data sun5i_a13_ahb_gates_data = {
+static const struct gates_data sun5i_a10s_ahb_gates_data __initconst = {
+ .mask = {0x147667e7, 0x185915},
+};
+
+static const struct gates_data sun5i_a13_ahb_gates_data __initconst = {
.mask = {0x107067e7, 0x185111},
};
-static const __initconst struct gates_data sun4i_apb0_gates_data = {
+static const struct gates_data sun6i_a31_ahb1_gates_data __initconst = {
+ .mask = {0xEDFE7F62, 0x794F931},
+};
+
+static const struct gates_data sun7i_a20_ahb_gates_data __initconst = {
+ .mask = { 0x12f77fff, 0x16ff3f },
+};
+
+static const struct gates_data sun4i_apb0_gates_data __initconst = {
.mask = {0x4EF},
};
-static const __initconst struct gates_data sun5i_a13_apb0_gates_data = {
+static const struct gates_data sun5i_a10s_apb0_gates_data __initconst = {
+ .mask = {0x469},
+};
+
+static const struct gates_data sun5i_a13_apb0_gates_data __initconst = {
.mask = {0x61},
};
-static const __initconst struct gates_data sun4i_apb1_gates_data = {
+static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
+ .mask = { 0x4ff },
+};
+
+static const struct gates_data sun4i_apb1_gates_data __initconst = {
.mask = {0xFF00F7},
};
-static const __initconst struct gates_data sun5i_a13_apb1_gates_data = {
+static const struct gates_data sun5i_a10s_apb1_gates_data __initconst = {
+ .mask = {0xf0007},
+};
+
+static const struct gates_data sun5i_a13_apb1_gates_data __initconst = {
.mask = {0xa0007},
};
+static const struct gates_data sun6i_a31_apb1_gates_data __initconst = {
+ .mask = {0x3031},
+};
+
+static const struct gates_data sun6i_a31_apb2_gates_data __initconst = {
+ .mask = {0x3F000F},
+};
+
+static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
+ .mask = { 0xff80ff },
+};
+
static void __init sunxi_gates_clk_setup(struct device_node *node,
struct gates_data *data)
{
@@ -410,43 +556,49 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
-/* Matches for of_clk_init */
-static const __initconst struct of_device_id clk_match[] = {
- {.compatible = "allwinner,sun4i-osc-clk", .data = sunxi_osc_clk_setup,},
- {}
-};
-
/* Matches for factors clocks */
-static const __initconst struct of_device_id clk_factors_match[] = {
- {.compatible = "allwinner,sun4i-pll1-clk", .data = &pll1_data,},
- {.compatible = "allwinner,sun4i-apb1-clk", .data = &apb1_data,},
+static const struct of_device_id clk_factors_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,},
+ {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
+ {.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,},
{}
};
/* Matches for divider clocks */
-static const __initconst struct of_device_id clk_div_match[] = {
- {.compatible = "allwinner,sun4i-axi-clk", .data = &axi_data,},
- {.compatible = "allwinner,sun4i-ahb-clk", .data = &ahb_data,},
- {.compatible = "allwinner,sun4i-apb0-clk", .data = &apb0_data,},
+static const struct of_device_id clk_div_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-axi-clk", .data = &sun4i_axi_data,},
+ {.compatible = "allwinner,sun4i-ahb-clk", .data = &sun4i_ahb_data,},
+ {.compatible = "allwinner,sun4i-apb0-clk", .data = &sun4i_apb0_data,},
+ {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
{}
};
/* Matches for mux clocks */
-static const __initconst struct of_device_id clk_mux_match[] = {
- {.compatible = "allwinner,sun4i-cpu-clk", .data = &cpu_mux_data,},
- {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &apb1_mux_data,},
+static const struct of_device_id clk_mux_match[] __initconst = {
+ {.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,},
+ {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
+ {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
{}
};
/* Matches for gate clocks */
-static const __initconst struct of_device_id clk_gates_match[] = {
+static const struct of_device_id clk_gates_match[] __initconst = {
{.compatible = "allwinner,sun4i-axi-gates-clk", .data = &sun4i_axi_gates_data,},
{.compatible = "allwinner,sun4i-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
+ {.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,},
{.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
+ {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
+ {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
{.compatible = "allwinner,sun4i-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
+ {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
+ {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
{.compatible = "allwinner,sun4i-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
+ {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
{.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
+ {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
+ {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
+ {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
{}
};
@@ -467,8 +619,8 @@ static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_mat
void __init sunxi_init_clocks(void)
{
- /* Register all the simple sunxi clocks on DT */
- of_clk_init(clk_match);
+ /* Register all the simple and basic clocks on DT */
+ of_clk_init(NULL);
/* Register factor clocks */
of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 806d803..9467da7 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1566,7 +1566,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio0 */
clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0,
NULL);
clks[audio0_mux] = clk;
@@ -1578,7 +1579,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio1 */
clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0,
NULL);
clks[audio1_mux] = clk;
@@ -1590,7 +1592,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio2 */
clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0,
NULL);
clks[audio2_mux] = clk;
@@ -1602,7 +1605,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio3 */
clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0,
NULL);
clks[audio3_mux] = clk;
@@ -1614,7 +1618,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* audio4 */
clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0,
NULL);
clks[audio4_mux] = clk;
@@ -1626,7 +1631,8 @@ static void __init tegra114_audio_clk_init(void __iomem *clk_base)
/* spdif */
clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0,
NULL);
clks[spdif_mux] = clk;
@@ -1721,7 +1727,8 @@ static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
/* clk_out_1 */
clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents), 0,
+ ARRAY_SIZE(clk_out1_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
&clk_out_lock);
clks[clk_out_1_mux] = clk;
@@ -1733,7 +1740,8 @@ static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
/* clk_out_2 */
clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents), 0,
+ ARRAY_SIZE(clk_out2_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
&clk_out_lock);
clks[clk_out_2_mux] = clk;
@@ -1745,7 +1753,8 @@ static void __init tegra114_pmc_clk_init(void __iomem *pmc_base)
/* clk_out_3 */
clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents), 0,
+ ARRAY_SIZE(clk_out3_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
&clk_out_lock);
clks[clk_out_3_mux] = clk;
@@ -2063,7 +2072,8 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base)
/* dsia */
clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
- ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
clks[dsia_mux] = clk;
clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
@@ -2073,7 +2083,8 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base)
/* dsib */
clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
- ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
+ ARRAY_SIZE(mux_plld_out0_plld2_out0),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
clks[dsib_mux] = clk;
clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
@@ -2110,7 +2121,8 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base)
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ ARRAY_SIZE(mux_pllmcp_clkm),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
29, 3, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base,
@@ -2194,7 +2206,7 @@ static const struct of_device_id pmc_match[] __initconst = {
* dfll_soc/dfll_ref apparently must be kept enabled, otherwise I2C5
* breaks
*/
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
{uarta, pll_p, 408000000, 0},
{uartb, pll_p, 408000000, 0},
{uartc, pll_p, 408000000, 0},
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 759ca47..056f649 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -778,7 +778,8 @@ static void __init tegra20_audio_clk_init(void)
/* audio */
clk = clk_register_mux(NULL, "audio_mux", audio_parents,
- ARRAY_SIZE(audio_parents), 0,
+ ARRAY_SIZE(audio_parents),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
clk_base + AUDIO_SYNC_CLK, 4,
@@ -941,7 +942,8 @@ static void __init tegra20_periph_clk_init(void)
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ ARRAY_SIZE(mux_pllmcp_clkm),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
@@ -1223,7 +1225,7 @@ static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
#endif
};
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
{pll_p, clk_max, 216000000, 1},
{pll_p_out1, clk_max, 28800000, 1},
{pll_p_out2, clk_max, 48000000, 1},
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index e2c6ca0..dbe7c80 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -971,7 +971,7 @@ static void __init tegra30_pll_init(void)
/* PLLU */
clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
- TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ TEGRA_PLL_SET_LFCON,
pll_u_freq_table,
NULL);
clk_register_clkdev(clk, "pll_u", NULL);
@@ -1026,7 +1026,8 @@ static void __init tegra30_pll_init(void)
/* PLLE */
clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
- ARRAY_SIZE(pll_e_parents), 0,
+ ARRAY_SIZE(pll_e_parents),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + PLLE_AUX, 2, 1, 0, NULL);
clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
@@ -1086,7 +1087,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio0 */
clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S0, 4,
@@ -1096,7 +1098,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio1 */
clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S1, 4,
@@ -1106,7 +1109,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio2 */
clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S2, 4,
@@ -1116,7 +1120,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio3 */
clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S3, 4,
@@ -1126,7 +1131,8 @@ static void __init tegra30_audio_clk_init(void)
/* audio4 */
clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
clk_base + AUDIO_SYNC_CLK_I2S4, 4,
@@ -1136,7 +1142,8 @@ static void __init tegra30_audio_clk_init(void)
/* spdif */
clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
- ARRAY_SIZE(mux_audio_sync_clk), 0,
+ ARRAY_SIZE(mux_audio_sync_clk),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
@@ -1229,7 +1236,8 @@ static void __init tegra30_pmc_clk_init(void)
/* clk_out_1 */
clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
- ARRAY_SIZE(clk_out1_parents), 0,
+ ARRAY_SIZE(clk_out1_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
&clk_out_lock);
clks[clk_out_1_mux] = clk;
@@ -1241,7 +1249,8 @@ static void __init tegra30_pmc_clk_init(void)
/* clk_out_2 */
clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
- ARRAY_SIZE(clk_out2_parents), 0,
+ ARRAY_SIZE(clk_out2_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
&clk_out_lock);
clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
@@ -1252,7 +1261,8 @@ static void __init tegra30_pmc_clk_init(void)
/* clk_out_3 */
clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
- ARRAY_SIZE(clk_out3_parents), 0,
+ ARRAY_SIZE(clk_out3_parents),
+ CLK_SET_RATE_NO_REPARENT,
pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
&clk_out_lock);
clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
@@ -1679,7 +1689,8 @@ static void __init tegra30_periph_clk_init(void)
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ ARRAY_SIZE(mux_pllmcp_clkm),
+ CLK_SET_RATE_NO_REPARENT,
clk_base + CLK_SOURCE_EMC,
30, 2, 0, NULL);
clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
@@ -1901,7 +1912,7 @@ static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
#endif
};
-static __initdata struct tegra_clk_init_table init_table[] = {
+static struct tegra_clk_init_table init_table[] __initdata = {
{uarta, pll_p, 408000000, 0},
{uartb, pll_p, 408000000, 0},
{uartc, pll_p, 408000000, 0},
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index a4a728d..2d5e1b4 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -37,8 +37,8 @@ static void __init vexpress_sp810_init(void __iomem *base)
snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
vexpress_sp810_timerclken[i] = clk_register_mux(NULL, name,
- parents, 2, 0, base + SCCTRL,
- SCCTRL_TIMERENnSEL_SHIFT(i), 1,
+ parents, 2, CLK_SET_RATE_NO_REPARENT,
+ base + SCCTRL, SCCTRL_TIMERENnSEL_SHIFT(i), 1,
0, &vexpress_sp810_lock);
if (WARN_ON(IS_ERR(vexpress_sp810_timerclken[i])))
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 089d3e3..cc40fe6 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -125,8 +125,9 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
div0_name = kasprintf(GFP_KERNEL, "%s_div0", clk_name);
div1_name = kasprintf(GFP_KERNEL, "%s_div1", clk_name);
- clk = clk_register_mux(NULL, mux_name, parents, 4, 0,
- fclk_ctrl_reg, 4, 2, 0, fclk_lock);
+ clk = clk_register_mux(NULL, mux_name, parents, 4,
+ CLK_SET_RATE_NO_REPARENT, fclk_ctrl_reg, 4, 2, 0,
+ fclk_lock);
clk = clk_register_divider(NULL, div0_name, mux_name,
0, fclk_ctrl_reg, 8, 6, CLK_DIVIDER_ONE_BASED |
@@ -168,8 +169,8 @@ static void __init zynq_clk_register_periph_clk(enum zynq_clk clk0,
mux_name = kasprintf(GFP_KERNEL, "%s_mux", clk_name0);
div_name = kasprintf(GFP_KERNEL, "%s_div", clk_name0);
- clk = clk_register_mux(NULL, mux_name, parents, 4, 0,
- clk_ctrl, 4, 2, 0, lock);
+ clk = clk_register_mux(NULL, mux_name, parents, 4,
+ CLK_SET_RATE_NO_REPARENT, clk_ctrl, 4, 2, 0, lock);
clk = clk_register_divider(NULL, div_name, mux_name, 0, clk_ctrl, 8, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, lock);
@@ -236,25 +237,26 @@ static void __init zynq_clk_setup(struct device_node *np)
clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
SLCR_PLL_STATUS, 0, &armpll_lock);
clks[armpll] = clk_register_mux(NULL, clk_output_name[armpll],
- armpll_parents, 2, 0, SLCR_ARMPLL_CTRL, 4, 1, 0,
- &armpll_lock);
+ armpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+ SLCR_ARMPLL_CTRL, 4, 1, 0, &armpll_lock);
clk = clk_register_zynq_pll("ddrpll_int", "ps_clk", SLCR_DDRPLL_CTRL,
SLCR_PLL_STATUS, 1, &ddrpll_lock);
clks[ddrpll] = clk_register_mux(NULL, clk_output_name[ddrpll],
- ddrpll_parents, 2, 0, SLCR_DDRPLL_CTRL, 4, 1, 0,
- &ddrpll_lock);
+ ddrpll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+ SLCR_DDRPLL_CTRL, 4, 1, 0, &ddrpll_lock);
clk = clk_register_zynq_pll("iopll_int", "ps_clk", SLCR_IOPLL_CTRL,
SLCR_PLL_STATUS, 2, &iopll_lock);
clks[iopll] = clk_register_mux(NULL, clk_output_name[iopll],
- iopll_parents, 2, 0, SLCR_IOPLL_CTRL, 4, 1, 0,
- &iopll_lock);
+ iopll_parents, 2, CLK_SET_RATE_NO_REPARENT,
+ SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);
/* CPU clocks */
tmp = readl(SLCR_621_TRUE) & 1;
- clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4, 0,
- SLCR_ARM_CLK_CTRL, 4, 2, 0, &armclk_lock);
+ clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
+ &armclk_lock);
clk = clk_register_divider(NULL, "cpu_div", "cpu_mux", 0,
SLCR_ARM_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &armclk_lock);
@@ -293,8 +295,9 @@ static void __init zynq_clk_setup(struct device_node *np)
swdt_ext_clk_mux_parents[i + 1] = dummy_nm;
}
clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
- swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
+ swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_SWDT_CLK_SEL, 0, 1, 0,
+ &swdtclk_lock);
/* DDR clocks */
clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -356,8 +359,9 @@ static void __init zynq_clk_setup(struct device_node *np)
gem0_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4, 0,
- SLCR_GEM0_CLK_CTRL, 4, 2, 0, &gem0clk_lock);
+ clk = clk_register_mux(NULL, "gem0_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_GEM0_CLK_CTRL, 4, 2, 0,
+ &gem0clk_lock);
clk = clk_register_divider(NULL, "gem0_div0", "gem0_mux", 0,
SLCR_GEM0_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem0clk_lock);
@@ -366,7 +370,8 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
- CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ SLCR_GEM0_CLK_CTRL, 6, 1, 0,
&gem0clk_lock);
clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
"gem0_emio_mux", CLK_SET_RATE_PARENT,
@@ -379,8 +384,9 @@ static void __init zynq_clk_setup(struct device_node *np)
gem1_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4, 0,
- SLCR_GEM1_CLK_CTRL, 4, 2, 0, &gem1clk_lock);
+ clk = clk_register_mux(NULL, "gem1_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_GEM1_CLK_CTRL, 4, 2, 0,
+ &gem1clk_lock);
clk = clk_register_divider(NULL, "gem1_div0", "gem1_mux", 0,
SLCR_GEM1_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &gem1clk_lock);
@@ -389,7 +395,8 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
- CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ SLCR_GEM1_CLK_CTRL, 6, 1, 0,
&gem1clk_lock);
clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
"gem1_emio_mux", CLK_SET_RATE_PARENT,
@@ -409,8 +416,9 @@ static void __init zynq_clk_setup(struct device_node *np)
can_mio_mux_parents[i] = dummy_nm;
}
kfree(clk_name);
- clk = clk_register_mux(NULL, "can_mux", periph_parents, 4, 0,
- SLCR_CAN_CLK_CTRL, 4, 2, 0, &canclk_lock);
+ clk = clk_register_mux(NULL, "can_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
+ &canclk_lock);
clk = clk_register_divider(NULL, "can_div0", "can_mux", 0,
SLCR_CAN_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &canclk_lock);
@@ -425,17 +433,21 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_CAN_CLK_CTRL, 1, 0,
&canclk_lock);
clk = clk_register_mux(NULL, "can0_mio_mux",
- can_mio_mux_parents, 54, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 0, 6, 0, &canmioclk_lock);
+ can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 0, 6, 0,
+ &canmioclk_lock);
clk = clk_register_mux(NULL, "can1_mio_mux",
- can_mio_mux_parents, 54, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 16, 6, 0, &canmioclk_lock);
+ can_mio_mux_parents, 54, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 16, 6,
+ 0, &canmioclk_lock);
clks[can0] = clk_register_mux(NULL, clk_output_name[can0],
- can0_mio_mux2_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 6, 1, 0, &canmioclk_lock);
+ can0_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 6, 1, 0,
+ &canmioclk_lock);
clks[can1] = clk_register_mux(NULL, clk_output_name[can1],
- can1_mio_mux2_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_CAN_MIOCLK_CTRL, 22, 1, 0, &canmioclk_lock);
+ can1_mio_mux2_parents, 2, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT, SLCR_CAN_MIOCLK_CTRL, 22, 1,
+ 0, &canmioclk_lock);
for (i = 0; i < ARRAY_SIZE(dbgtrc_emio_input_names); i++) {
int idx = of_property_match_string(np, "clock-names",
@@ -444,13 +456,15 @@ static void __init zynq_clk_setup(struct device_node *np)
dbg_emio_mux_parents[i + 1] = of_clk_get_parent_name(np,
idx);
}
- clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4, 0,
- SLCR_DBG_CLK_CTRL, 4, 2, 0, &dbgclk_lock);
+ clk = clk_register_mux(NULL, "dbg_mux", periph_parents, 4,
+ CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 4, 2, 0,
+ &dbgclk_lock);
clk = clk_register_divider(NULL, "dbg_div", "dbg_mux", 0,
SLCR_DBG_CLK_CTRL, 8, 6, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &dbgclk_lock);
- clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2, 0,
- SLCR_DBG_CLK_CTRL, 6, 1, 0, &dbgclk_lock);
+ clk = clk_register_mux(NULL, "dbg_emio_mux", dbg_emio_mux_parents, 2,
+ CLK_SET_RATE_NO_REPARENT, SLCR_DBG_CLK_CTRL, 6, 1, 0,
+ &dbgclk_lock);
clks[dbg_trc] = clk_register_gate(NULL, clk_output_name[dbg_trc],
"dbg_emio_mux", CLK_SET_RATE_PARENT, SLCR_DBG_CLK_CTRL,
0, 0, &dbgclk_lock);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 47e307c..3226f54 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -50,6 +50,9 @@ struct zynq_pll {
#define PLLCTRL_RESET_MASK 1
#define PLLCTRL_RESET_SHIFT 0
+#define PLL_FBDIV_MIN 13
+#define PLL_FBDIV_MAX 66
+
/**
* zynq_pll_round_rate() - Round a clock frequency
* @hw: Handle between common and hardware-specific interfaces
@@ -63,10 +66,10 @@ static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u32 fbdiv;
fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
- if (fbdiv < 13)
- fbdiv = 13;
- else if (fbdiv > 66)
- fbdiv = 66;
+ if (fbdiv < PLL_FBDIV_MIN)
+ fbdiv = PLL_FBDIV_MIN;
+ else if (fbdiv > PLL_FBDIV_MAX)
+ fbdiv = PLL_FBDIV_MAX;
return *prate * fbdiv;
}
@@ -182,7 +185,13 @@ static const struct clk_ops zynq_pll_ops = {
/**
* clk_register_zynq_pll() - Register PLL with the clock framework
- * @np Pointer to the DT device node
+ * @name PLL name
+ * @parent Parent clock name
+ * @pll_ctrl Pointer to PLL control register
+ * @pll_status Pointer to PLL status register
+ * @lock_index Bit index to this PLL's lock status bit in @pll_status
+ * @lock Register lock
+ * Returns handle to the registered clock.
*/
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index,
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index ac60f8b..ab29476 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -368,10 +368,6 @@ static void __init samsung_clocksource_init(void)
static void __init samsung_timer_resources(void)
{
- pwm.timerclk = clk_get(NULL, "timers");
- if (IS_ERR(pwm.timerclk))
- panic("failed to get timers clock for timer");
-
clk_prepare_enable(pwm.timerclk);
pwm.tcnt_max = (1UL << pwm.variant.bits) - 1;
@@ -416,6 +412,10 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs));
+ pwm.timerclk = clk_get(NULL, "timers");
+ if (IS_ERR(pwm.timerclk))
+ panic("failed to get timers clock for timer");
+
_samsung_pwm_clocksource_init();
}
@@ -447,6 +447,10 @@ static void __init samsung_pwm_alloc(struct device_node *np,
return;
}
+ pwm.timerclk = of_clk_get_by_name(np, "timers");
+ if (IS_ERR(pwm.timerclk))
+ panic("failed to get timers clock for timer");
+
_samsung_pwm_clocksource_init();
}
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index b330219..8e36603 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -27,3 +27,13 @@ config ARM_U8500_CPUIDLE
help
Select this to enable cpuidle for ST-E u8500 processors
+config CPU_IDLE_BIG_LITTLE
+ bool "Support for ARM big.LITTLE processors"
+ depends on ARCH_VEXPRESS_TC2_PM
+ select ARM_CPU_SUSPEND
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ help
+ Select this option to enable CPU idle driver for big.LITTLE based
+ ARM systems. Driver manages CPUs coordination through MCPM and
+ define different C-states for little and big cores through the
+ multiple CPU idle drivers infrastructure.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 0b9d200..cea5ef5 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
+obj-$(CONFIG_CPU_IDLE_BIG_LITTLE) += cpuidle-big_little.o
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
new file mode 100644
index 0000000..b45fc62
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013 ARM/Linaro
+ *
+ * Authors: Daniel Lezcano <daniel.lezcano@linaro.org>
+ * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * Nicolas Pitre <nicolas.pitre@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+#include <asm/cpuidle.h>
+#include <asm/mcpm.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx);
+
+/*
+ * NB: Owing to current menu governor behaviour big and LITTLE
+ * index 1 states have to define exit_latency and target_residency for
+ * cluster state since, when all CPUs in a cluster hit it, the cluster
+ * can be shutdown. This means that when a single CPU enters this state
+ * the exit_latency and target_residency values are somewhat overkill.
+ * There is no notion of cluster states in the menu governor, so CPUs
+ * have to define CPU states where possibly the cluster will be shutdown
+ * depending on the state of other CPUs. idle states entry and exit happen
+ * at random times; however the cluster state provides target_residency
+ * values as if all CPUs in a cluster enter the state at once; this is
+ * somewhat optimistic and behaviour should be fixed either in the governor
+ * or in the MCPM back-ends.
+ * To make this driver 100% generic the number of states and the exit_latency
+ * target_residency values must be obtained from device tree bindings.
+ *
+ * exit_latency: refers to the TC2 vexpress test chip and depends on the
+ * current cluster operating point. It is the time it takes to get the CPU
+ * up and running when the CPU is powered up on cluster wake-up from shutdown.
+ * Current values for big and LITTLE clusters are provided for clusters
+ * running at default operating points.
+ *
+ * target_residency: it is the minimum amount of time the cluster has
+ * to be down to break even in terms of power consumption. cluster
+ * shutdown has inherent dynamic power costs (L2 writebacks to DRAM
+ * being the main factor) that depend on the current operating points.
+ * The current values for both clusters are provided for a CPU whose half
+ * of L2 lines are dirty and require cleaning to DRAM, and takes into
+ * account leakage static power values related to the vexpress TC2 testchip.
+ */
+static struct cpuidle_driver bl_idle_little_driver = {
+ .name = "little_idle",
+ .owner = THIS_MODULE,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = bl_enter_powerdown,
+ .exit_latency = 700,
+ .target_residency = 2500,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_TIMER_STOP,
+ .name = "C1",
+ .desc = "ARM little-cluster power down",
+ },
+ .state_count = 2,
+};
+
+static struct cpuidle_driver bl_idle_big_driver = {
+ .name = "big_idle",
+ .owner = THIS_MODULE,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = bl_enter_powerdown,
+ .exit_latency = 500,
+ .target_residency = 2000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_TIMER_STOP,
+ .name = "C1",
+ .desc = "ARM big-cluster power down",
+ },
+ .state_count = 2,
+};
+
+/*
+ * notrace prevents trace shims from getting inserted where they
+ * should not. Global jumps and ldrex/strex must not be inserted
+ * in power down sequences where caches and MMU may be turned off.
+ */
+static int notrace bl_powerdown_finisher(unsigned long arg)
+{
+ /* MCPM works with HW CPU identifiers */
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
+ mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+
+ /*
+ * Residency value passed to mcpm_cpu_suspend back-end
+ * has to be given clear semantics. Set to 0 as a
+ * temporary value.
+ */
+ mcpm_cpu_suspend(0);
+
+ /* return value != 0 means failure */
+ return 1;
+}
+
+/**
+ * bl_enter_powerdown - Programs CPU to enter the specified state
+ * @dev: cpuidle device
+ * @drv: The target state to be programmed
+ * @idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ cpu_pm_enter();
+
+ cpu_suspend(0, bl_powerdown_finisher);
+
+ /* signals the MCPM core that CPU is out of low power state */
+ mcpm_cpu_powered_up();
+
+ cpu_pm_exit();
+
+ return idx;
+}
+
+static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id)
+{
+ struct cpuinfo_arm *cpu_info;
+ struct cpumask *cpumask;
+ unsigned long cpuid;
+ int cpu;
+
+ cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!cpumask)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ cpu_info = &per_cpu(cpu_data, cpu);
+ cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id();
+
+ /* read cpu id part number */
+ if ((cpuid & 0xFFF0) == cpu_id)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ drv->cpumask = cpumask;
+
+ return 0;
+}
+
+static int __init bl_idle_init(void)
+{
+ int ret;
+
+ /*
+ * Initialize the driver just for a compliant set of machines
+ */
+ if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7"))
+ return -ENODEV;
+ /*
+ * For now the differentiation between little and big cores
+ * is based on the part number. A7 cores are considered little
+ * cores, A15 are considered big cores. This distinction may
+ * evolve in the future with a more generic matching approach.
+ */
+ ret = bl_idle_driver_init(&bl_idle_little_driver,
+ ARM_CPU_PART_CORTEX_A7);
+ if (ret)
+ return ret;
+
+ ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
+ if (ret)
+ goto out_uninit_little;
+
+ ret = cpuidle_register(&bl_idle_little_driver, NULL);
+ if (ret)
+ goto out_uninit_big;
+
+ ret = cpuidle_register(&bl_idle_big_driver, NULL);
+ if (ret)
+ goto out_unregister_little;
+
+ return 0;
+
+out_unregister_little:
+ cpuidle_unregister(&bl_idle_little_driver);
+out_uninit_big:
+ kfree(bl_idle_big_driver.cpumask);
+out_uninit_little:
+ kfree(bl_idle_little_driver.cpumask);
+
+ return ret;
+}
+device_initcall(bl_idle_init);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index daa4da2..526ec77 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,6 +308,15 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config K3_DMA
+ tristate "Hisilicon K3 DMA support"
+ depends on ARCH_HI3xxx
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the DMA engine for Hisilicon K3 platform
+ devices.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6d62ec3..db89035 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 5a18f82..e69b03c 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -43,7 +43,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
struct list_head resource_list;
struct resource_list_entry *rentry;
resource_size_t mem = 0, irq = 0;
- u32 vendor_id;
int ret;
if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
@@ -73,9 +72,8 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
return 0;
- vendor_id = le32_to_cpu(grp->vendor_id);
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
- (char *)&vendor_id, grp->device_id, grp->revision);
+ (char *)&grp->vendor_id, grp->device_id, grp->revision);
/* Check if the request line range is available */
if (si->base_request_line == 0 && si->num_handshake_signals == 0)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 06fe45c..fce46c5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -24,6 +24,7 @@
*
* Documentation: ARM DDI 0196G == PL080
* Documentation: ARM DDI 0218E == PL081
+ * Documentation: S3C6410 User's Manual == PL080S
*
* PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
* channel.
@@ -36,6 +37,14 @@
*
* The PL080 has a dual bus master, PL081 has a single master.
*
+ * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
+ * It differs in following aspects:
+ * - CH_CONFIG register at different offset,
+ * - separate CH_CONTROL2 register for transfer size,
+ * - bigger maximum transfer size,
+ * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
+ * - no support for peripheral flow control.
+ *
* Memory to peripheral transfer may be visualized as
* Get data from memory to DMAC
* Until no data left
@@ -64,10 +73,7 @@
* - Peripheral flow control: the transfer size is ignored (and should be
* zero). The data is transferred from the current LLI entry, until
* after the final transfer signalled by LBREQ or LSREQ. The DMAC
- * will then move to the next LLI entry.
- *
- * Global TODO:
- * - Break out common code from arch/arm/mach-s3c64xx and share
+ * will then move to the next LLI entry. Unsupported by PL080S.
*/
#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
@@ -100,24 +106,16 @@ struct pl08x_driver_data;
* @nomadik: whether the channels have Nomadik security extension bits
* that need to be checked for permission before use and some registers are
* missing
+ * @pl080s: whether this version is a PL080S, which has separate register and
+ * LLI word for transfer size.
*/
struct vendor_data {
+ u8 config_offset;
u8 channels;
bool dualmaster;
bool nomadik;
-};
-
-/*
- * PL08X private data structures
- * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
- * start & end do not - their bus bit info is in cctl. Also note that these
- * are fixed 32-bit quantities.
- */
-struct pl08x_lli {
- u32 src;
- u32 dst;
- u32 lli;
- u32 cctl;
+ bool pl080s;
+ u32 max_transfer_size;
};
/**
@@ -133,6 +131,8 @@ struct pl08x_bus_data {
u8 buswidth;
};
+#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
+
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
@@ -145,6 +145,7 @@ struct pl08x_bus_data {
struct pl08x_phy_chan {
unsigned int id;
void __iomem *base;
+ void __iomem *reg_config;
spinlock_t lock;
struct pl08x_dma_chan *serving;
bool locked;
@@ -174,12 +175,13 @@ struct pl08x_sg {
* @ccfg: config reg values for current txd
* @done: this marks completed descriptors, which should not have their
* mux released.
+ * @cyclic: indicate cyclic transfers
*/
struct pl08x_txd {
struct virt_dma_desc vd;
struct list_head dsg_list;
dma_addr_t llis_bus;
- struct pl08x_lli *llis_va;
+ u32 *llis_va;
/* Default cctl value for LLIs */
u32 cctl;
/*
@@ -188,6 +190,7 @@ struct pl08x_txd {
*/
u32 ccfg;
bool done;
+ bool cyclic;
};
/**
@@ -263,17 +266,29 @@ struct pl08x_driver_data {
struct dma_pool *pool;
u8 lli_buses;
u8 mem_buses;
+ u8 lli_words;
};
/*
* PL08X specific defines
*/
-/* Size (bytes) of each LLI buffer allocated for one transfer */
-# define PL08X_LLI_TSFR_SIZE 0x2000
+/* The order of words in an LLI. */
+#define PL080_LLI_SRC 0
+#define PL080_LLI_DST 1
+#define PL080_LLI_LLI 2
+#define PL080_LLI_CCTL 3
+#define PL080S_LLI_CCTL2 4
+
+/* Total words in an LLI. */
+#define PL080_LLI_WORDS 4
+#define PL080S_LLI_WORDS 8
-/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
+/*
+ * Number of LLIs in each LLI buffer allocated for one transfer
+ * (maximum times we call dma_pool_alloc on this pool without freeing)
+ */
+#define MAX_NUM_TSFR_LLIS 512
#define PL08X_ALIGN 8
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@ -334,10 +349,39 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
{
unsigned int val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
return val & PL080_CONFIG_ACTIVE;
}
+static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
+{
+ if (pl08x->vd->pl080s)
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
+ lli[PL080S_LLI_CCTL2], ccfg);
+ else
+ dev_vdbg(&pl08x->adev->dev,
+ "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+ "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+ phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+ lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
+
+ writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
+ writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
+ writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
+ writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
+
+ if (pl08x->vd->pl080s)
+ writel_relaxed(lli[PL080S_LLI_CCTL2],
+ phychan->base + PL080S_CH_CONTROL2);
+
+ writel(ccfg, phychan->reg_config);
+}
+
/*
* Set the initial DMA register values i.e. those for the first LLI
* The next LLI pointer and the configuration interrupt bit have
@@ -350,7 +394,6 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
struct pl08x_phy_chan *phychan = plchan->phychan;
struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
- struct pl08x_lli *lli;
u32 val;
list_del(&txd->vd.node);
@@ -361,19 +404,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
while (pl08x_phy_channel_busy(phychan))
cpu_relax();
- lli = &txd->llis_va[0];
-
- dev_vdbg(&pl08x->adev->dev,
- "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
- "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
- phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
- txd->ccfg);
-
- writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
- writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
- writel(lli->lli, phychan->base + PL080_CH_LLI);
- writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
- writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+ pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
/* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */
@@ -381,11 +412,11 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
cpu_relax();
/* Do not access config register until channel shows as inactive */
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
- val = readl(phychan->base + PL080_CH_CONFIG);
+ val = readl(phychan->reg_config);
- writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
+ writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
}
/*
@@ -404,9 +435,9 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
int timeout;
/* Set the HALT bit and wait for the FIFO to drain */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val |= PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
/* Wait for channel inactive */
for (timeout = 1000; timeout; timeout--) {
@@ -423,9 +454,9 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
u32 val;
/* Clear the HALT bit */
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
val &= ~PL080_CONFIG_HALT;
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
}
/*
@@ -437,12 +468,12 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- u32 val = readl(ch->base + PL080_CH_CONFIG);
+ u32 val = readl(ch->reg_config);
val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
PL080_CONFIG_TC_IRQ_MASK);
- writel(val, ch->base + PL080_CH_CONFIG);
+ writel(val, ch->reg_config);
writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
@@ -453,6 +484,28 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The source width defines the number of bytes */
u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
+ switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ case PL080_WIDTH_8BIT:
+ break;
+ case PL080_WIDTH_16BIT:
+ bytes *= 2;
+ break;
+ case PL080_WIDTH_32BIT:
+ bytes *= 4;
+ break;
+ }
+ return bytes;
+}
+
+static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
+{
+ /* The source width defines the number of bytes */
+ u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+
+ cctl &= PL080_CONTROL_SWIDTH_MASK;
+
switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
case PL080_WIDTH_8BIT:
break;
@@ -469,47 +522,66 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
/* The channel should be paused when calling this */
static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ const u32 *llis_va, *llis_va_limit;
struct pl08x_phy_chan *ch;
+ dma_addr_t llis_bus;
struct pl08x_txd *txd;
- size_t bytes = 0;
+ u32 llis_max_words;
+ size_t bytes;
+ u32 clli;
ch = plchan->phychan;
txd = plchan->at;
+ if (!ch || !txd)
+ return 0;
+
/*
* Follow the LLIs to get the number of remaining
* bytes in the currently active transaction.
*/
- if (ch && txd) {
- u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
+ clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
- /* First get the remaining bytes in the active transfer */
+ /* First get the remaining bytes in the active transfer */
+ if (pl08x->vd->pl080s)
+ bytes = get_bytes_in_cctl_pl080s(
+ readl(ch->base + PL080_CH_CONTROL),
+ readl(ch->base + PL080S_CH_CONTROL2));
+ else
bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
- if (clli) {
- struct pl08x_lli *llis_va = txd->llis_va;
- dma_addr_t llis_bus = txd->llis_bus;
- int index;
+ if (!clli)
+ return bytes;
- BUG_ON(clli < llis_bus || clli >= llis_bus +
- sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+ llis_va = txd->llis_va;
+ llis_bus = txd->llis_bus;
- /*
- * Locate the next LLI - as this is an array,
- * it's simple maths to find.
- */
- index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+ llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
+ BUG_ON(clli < llis_bus || clli >= llis_bus +
+ sizeof(u32) * llis_max_words);
- for (; index < MAX_NUM_TSFR_LLIS; index++) {
- bytes += get_bytes_in_cctl(llis_va[index].cctl);
+ /*
+ * Locate the next LLI - as this is an array,
+ * it's simple maths to find.
+ */
+ llis_va += (clli - llis_bus) / sizeof(u32);
- /*
- * A LLI pointer of 0 terminates the LLI list
- */
- if (!llis_va[index].lli)
- break;
- }
- }
+ llis_va_limit = llis_va + llis_max_words;
+
+ for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
+ if (pl08x->vd->pl080s)
+ bytes += get_bytes_in_cctl_pl080s(
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ else
+ bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
+
+ /*
+ * A LLI pointer going backward terminates the LLI list
+ */
+ if (llis_va[PL080_LLI_LLI] <= clli)
+ break;
}
return bytes;
@@ -720,6 +792,7 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
break;
}
+ tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
return retbits;
}
@@ -764,20 +837,26 @@ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
/*
* Fills in one LLI for a certain transfer descriptor and advance the counter
*/
-static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
- int num_llis, int len, u32 cctl)
+static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd,
+ int num_llis, int len, u32 cctl, u32 cctl2)
{
- struct pl08x_lli *llis_va = bd->txd->llis_va;
+ u32 offset = num_llis * pl08x->lli_words;
+ u32 *llis_va = bd->txd->llis_va + offset;
dma_addr_t llis_bus = bd->txd->llis_bus;
BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
- llis_va[num_llis].cctl = cctl;
- llis_va[num_llis].src = bd->srcbus.addr;
- llis_va[num_llis].dst = bd->dstbus.addr;
- llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
- sizeof(struct pl08x_lli);
- llis_va[num_llis].lli |= bd->lli_bus;
+ /* Advance the offset to next LLI. */
+ offset += pl08x->lli_words;
+
+ llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
+ llis_va[PL080_LLI_DST] = bd->dstbus.addr;
+ llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
+ llis_va[PL080_LLI_LLI] |= bd->lli_bus;
+ llis_va[PL080_LLI_CCTL] = cctl;
+ if (pl08x->vd->pl080s)
+ llis_va[PL080S_LLI_CCTL2] = cctl2;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -789,14 +868,53 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len;
}
-static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
- u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
+static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
+ int num_llis, size_t *total_bytes)
{
*cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
- pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+ pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
(*total_bytes) += len;
}
+#ifdef VERBOSE_DEBUG
+static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis)
+{
+ int i;
+
+ if (pl08x->vd->pl080s) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL],
+ llis_va[PL080S_LLI_CCTL2]);
+ llis_va += pl08x->lli_words;
+ }
+ } else {
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
+ for (i = 0; i < num_llis; i++) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, llis_va, llis_va[PL080_LLI_SRC],
+ llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+ llis_va[PL080_LLI_CCTL]);
+ llis_va += pl08x->lli_words;
+ }
+ }
+}
+#else
+static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+ const u32 *llis_va, int num_llis) {}
+#endif
+
/*
* This fills in the table of LLIs for the transfer descriptor
* Note that we assume we never have to change the burst sizes
@@ -810,7 +928,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
int num_llis = 0;
u32 cctl, early_bytes = 0;
size_t max_bytes_per_lli, total_bytes;
- struct pl08x_lli *llis_va;
+ u32 *llis_va, *last_lli;
struct pl08x_sg *dsg;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
@@ -845,10 +963,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
- dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
- bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ dev_vdbg(&pl08x->adev->dev,
+ "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
+ (u64)bd.srcbus.addr,
+ cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
bd.srcbus.buswidth,
- bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ (u64)bd.dstbus.addr,
+ cctl & PL080_CONTROL_DST_INCR ? "+" : "",
bd.dstbus.buswidth,
bd.remainder);
dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
@@ -886,8 +1007,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return 0;
}
- if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
- (bd.dstbus.addr % bd.dstbus.buswidth)) {
+ if (!IS_BUS_ALIGNED(&bd.srcbus) ||
+ !IS_BUS_ALIGNED(&bd.dstbus)) {
dev_err(&pl08x->adev->dev,
"%s src & dst address must be aligned to src"
" & dst width if peripheral is flow controller",
@@ -897,7 +1018,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, 0);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ 0, cctl, 0);
break;
}
@@ -908,9 +1030,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
*/
if (bd.remainder < mbus->buswidth)
early_bytes = bd.remainder;
- else if ((mbus->addr) % (mbus->buswidth)) {
- early_bytes = mbus->buswidth - (mbus->addr) %
- (mbus->buswidth);
+ else if (!IS_BUS_ALIGNED(mbus)) {
+ early_bytes = mbus->buswidth -
+ (mbus->addr & (mbus->buswidth - 1));
if ((bd.remainder - early_bytes) < mbus->buswidth)
early_bytes = bd.remainder;
}
@@ -919,8 +1041,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s byte width LLIs (remain 0x%08x)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
- &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
+ num_llis++, &total_bytes);
}
if (bd.remainder) {
@@ -928,7 +1050,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* Master now aligned
* - if slave is not then we must set its width down
*/
- if (sbus->addr % sbus->buswidth) {
+ if (!IS_BUS_ALIGNED(sbus)) {
dev_dbg(&pl08x->adev->dev,
"%s set down bus width to one byte\n",
__func__);
@@ -941,7 +1063,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* MIN(buswidths)
*/
max_bytes_per_lli = bd.srcbus.buswidth *
- PL080_CONTROL_TRANSFER_SIZE_MASK;
+ pl08x->vd->max_transfer_size;
dev_vdbg(&pl08x->adev->dev,
"%s max bytes per lli = %zu\n",
__func__, max_bytes_per_lli);
@@ -976,8 +1098,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, tsize);
- pl08x_fill_lli_for_desc(&bd, num_llis++,
- lli_len, cctl);
+ pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+ lli_len, cctl, tsize);
total_bytes += lli_len;
}
@@ -988,8 +1110,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, send odd bytes (remain %zu)\n",
__func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, bd.remainder,
- num_llis++, &total_bytes);
+ prep_byte_width_lli(pl08x, &bd, &cctl,
+ bd.remainder, num_llis++, &total_bytes);
}
}
@@ -1003,33 +1125,25 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (num_llis >= MAX_NUM_TSFR_LLIS) {
dev_err(&pl08x->adev->dev,
"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
- __func__, (u32) MAX_NUM_TSFR_LLIS);
+ __func__, MAX_NUM_TSFR_LLIS);
return 0;
}
}
llis_va = txd->llis_va;
- /* The final LLI terminates the LLI. */
- llis_va[num_llis - 1].lli = 0;
- /* The final LLI element shall also fire an interrupt. */
- llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+ last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
-#ifdef VERBOSE_DEBUG
- {
- int i;
-
- dev_vdbg(&pl08x->adev->dev,
- "%-3s %-9s %-10s %-10s %-10s %s\n",
- "lli", "", "csrc", "cdst", "clli", "cctl");
- for (i = 0; i < num_llis; i++) {
- dev_vdbg(&pl08x->adev->dev,
- "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, &llis_va[i], llis_va[i].src,
- llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
- );
- }
+ if (txd->cyclic) {
+ /* Link back to the first LLI. */
+ last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
+ } else {
+ /* The final LLI terminates the LLI. */
+ last_lli[PL080_LLI_LLI] = 0;
+ /* The final LLI element shall also fire an interrupt. */
+ last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
}
-#endif
+
+ pl08x_dump_lli(pl08x, llis_va, num_llis);
return num_llis;
}
@@ -1305,6 +1419,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
if (!plchan->slave)
return -EINVAL;
@@ -1314,6 +1429,13 @@ static int dma_set_runtime_config(struct dma_chan *chan,
config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (config->device_fc && pl08x->vd->pl080s) {
+ dev_err(&pl08x->adev->dev,
+ "%s: PL080S does not support peripheral flow control\n",
+ __func__);
+ return -EINVAL;
+ }
+
plchan->cfg = *config;
return 0;
@@ -1404,25 +1526,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
-static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct pl08x_txd *pl08x_init_txd(
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction,
+ dma_addr_t *slave_addr)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- struct pl08x_sg *dsg;
- struct scatterlist *sg;
enum dma_slave_buswidth addr_width;
- dma_addr_t slave_addr;
int ret, tmp;
u8 src_buses, dst_buses;
u32 maxburst, cctl;
- dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sg_dma_len(sgl), plchan->name);
-
txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
@@ -1436,14 +1552,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
if (direction == DMA_MEM_TO_DEV) {
cctl = PL080_CONTROL_SRC_INCR;
- slave_addr = plchan->cfg.dst_addr;
+ *slave_addr = plchan->cfg.dst_addr;
addr_width = plchan->cfg.dst_addr_width;
maxburst = plchan->cfg.dst_maxburst;
src_buses = pl08x->mem_buses;
dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_DEV_TO_MEM) {
cctl = PL080_CONTROL_DST_INCR;
- slave_addr = plchan->cfg.src_addr;
+ *slave_addr = plchan->cfg.src_addr;
addr_width = plchan->cfg.src_addr_width;
maxburst = plchan->cfg.src_maxburst;
src_buses = plchan->cd->periph_buses;
@@ -1492,24 +1608,107 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
else
txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+ return txd;
+}
+
+static int pl08x_tx_add_sg(struct pl08x_txd *txd,
+ enum dma_transfer_direction direction,
+ dma_addr_t slave_addr,
+ dma_addr_t buf_addr,
+ unsigned int len)
+{
+ struct pl08x_sg *dsg;
+
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg)
+ return -ENOMEM;
+
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = len;
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = buf_addr;
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = buf_addr;
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ struct scatterlist *sg;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+ __func__, sg_dma_len(sgl), plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
- if (!dsg) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ sg_dma_address(sg),
+ sg_dma_len(sg));
+ if (ret) {
pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
__func__);
return NULL;
}
- list_add_tail(&dsg->node, &txd->dsg_list);
+ }
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else {
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev,
+ "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ __func__, period_len, buf_len,
+ direction == DMA_MEM_TO_DEV ? "to" : "from",
+ plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
+ txd->cyclic = true;
+ txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
+ for (tmp = 0; tmp < buf_len; tmp += period_len) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ buf_addr + tmp, period_len);
+ if (ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
}
}
@@ -1652,7 +1851,9 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
spin_lock(&plchan->vc.lock);
tx = plchan->at;
- if (tx) {
+ if (tx && tx->cyclic) {
+ vchan_cyclic_callback(&tx->vd);
+ } else if (tx) {
plchan->at = NULL;
/*
* This descriptor is done, release its mux
@@ -1846,6 +2047,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;
+ u32 tsfr_size;
int ret = 0;
int i;
@@ -1873,6 +2075,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Initialize slave engine */
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
pl08x->slave.dev = &adev->dev;
pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
@@ -1880,6 +2083,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
pl08x->slave.device_control = pl08x_control;
/* Get the platform data */
@@ -1902,9 +2106,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->mem_buses = pl08x->pd->mem_buses;
}
+ if (vd->pl080s)
+ pl08x->lli_words = PL080S_LLI_WORDS;
+ else
+ pl08x->lli_words = PL080_LLI_WORDS;
+ tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
+
/* A DMA memory pool for LLIs, align on 1-byte boundary */
pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
- PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+ tsfr_size, PL08X_ALIGN, 0);
if (!pl08x->pool) {
ret = -ENOMEM;
goto out_no_lli_pool;
@@ -1947,6 +2157,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
+ ch->reg_config = ch->base + vd->config_offset;
spin_lock_init(&ch->lock);
/*
@@ -1957,7 +2168,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
if (vd->nomadik) {
u32 val;
- val = readl(ch->base + PL080_CH_CONFIG);
+ val = readl(ch->reg_config);
if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
ch->locked = true;
@@ -2008,8 +2219,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pl08x);
init_pl08x_debugfs(pl08x);
- dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
- amba_part(adev), amba_rev(adev),
+ dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
+ amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
return 0;
@@ -2038,22 +2249,41 @@ out_no_pl08x:
/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_nomadik = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 8,
.dualmaster = true,
.nomadik = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
+};
+
+static struct vendor_data vendor_pl080s = {
+ .config_offset = PL080S_CH_CONFIG,
+ .channels = 8,
+ .pl080s = true,
+ .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
};
static struct vendor_data vendor_pl081 = {
+ .config_offset = PL080_CH_CONFIG,
.channels = 2,
.dualmaster = false,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
static struct amba_id pl08x_ids[] = {
+ /* Samsung PL080S variant */
+ {
+ .id = 0x0a141080,
+ .mask = 0xffffffff,
+ .data = &vendor_pl080s,
+ },
/* PL080 */
{
.id = 0x00041080,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 99af4db..9162ac8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -382,20 +382,30 @@ void dma_issue_pending_all(void)
EXPORT_SYMBOL(dma_issue_pending_all);
/**
- * nth_chan - returns the nth channel of the given capability
+ * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
+ */
+static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
+{
+ int node = dev_to_node(chan->device->dev);
+ return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
+}
+
+/**
+ * min_chan - returns the channel with min count and in the same numa-node as the cpu
* @cap: capability to match
- * @n: nth channel desired
+ * @cpu: cpu index which the channel should be close to
*
- * Defaults to returning the channel with the desired capability and the
- * lowest reference count when 'n' cannot be satisfied. Must be called
- * under dma_list_mutex.
+ * If some channels are close to the given cpu, the one with the lowest
+ * reference count is returned. Otherwise, cpu is ignored and only the
+ * reference count is taken into account.
+ * Must be called under dma_list_mutex.
*/
-static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
+static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
{
struct dma_device *device;
struct dma_chan *chan;
- struct dma_chan *ret = NULL;
struct dma_chan *min = NULL;
+ struct dma_chan *localmin = NULL;
list_for_each_entry(device, &dma_device_list, global_node) {
if (!dma_has_cap(cap, device->cap_mask) ||
@@ -404,27 +414,22 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
list_for_each_entry(chan, &device->channels, device_node) {
if (!chan->client_count)
continue;
- if (!min)
- min = chan;
- else if (chan->table_count < min->table_count)
+ if (!min || chan->table_count < min->table_count)
min = chan;
- if (n-- == 0) {
- ret = chan;
- break; /* done */
- }
+ if (dma_chan_is_local(chan, cpu))
+ if (!localmin ||
+ chan->table_count < localmin->table_count)
+ localmin = chan;
}
- if (ret)
- break; /* done */
}
- if (!ret)
- ret = min;
+ chan = localmin ? localmin : min;
- if (ret)
- ret->table_count++;
+ if (chan)
+ chan->table_count++;
- return ret;
+ return chan;
}
/**
@@ -441,7 +446,6 @@ static void dma_channel_rebalance(void)
struct dma_device *device;
int cpu;
int cap;
- int n;
/* undo the last distribution */
for_each_dma_cap_mask(cap, dma_cap_mask_all)
@@ -460,14 +464,9 @@ static void dma_channel_rebalance(void)
return;
/* redistribute available channels */
- n = 0;
for_each_dma_cap_mask(cap, dma_cap_mask_all)
for_each_online_cpu(cpu) {
- if (num_possible_cpus() > 1)
- chan = nth_chan(cap, n++);
- else
- chan = nth_chan(cap, -1);
-
+ chan = min_chan(cap, cpu);
per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
}
}
@@ -510,7 +509,33 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
}
/**
- * dma_request_channel - try to allocate an exclusive channel
+ * dma_request_slave_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+ int err = -EBUSY;
+
+ /* lock against __dma_request_channel */
+ mutex_lock(&dma_list_mutex);
+
+ if (chan->client_count == 0) {
+ err = dma_chan_get(chan);
+ if (err)
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+ } else
+ chan = NULL;
+
+ mutex_unlock(&dma_list_mutex);
+
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+/**
+ * __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e88ded2..92f796c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -25,44 +25,46 @@
#include <linux/seq_file.h>
static unsigned int test_buf_size = 16384;
-module_param(test_buf_size, uint, S_IRUGO);
+module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_channel[20];
-module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+module_param_string(channel, test_channel, sizeof(test_channel),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
static char test_device[20];
-module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+module_param_string(device, test_device, sizeof(test_device),
+ S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
-module_param(threads_per_chan, uint, S_IRUGO);
+module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
-module_param(max_channels, uint, S_IRUGO);
+module_param(max_channels, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
-module_param(iterations, uint, S_IRUGO);
+module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int xor_sources = 3;
-module_param(xor_sources, uint, S_IRUGO);
+module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
-module_param(pq_sources, uint, S_IRUGO);
+module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO);
+module_param(timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
@@ -193,7 +195,6 @@ struct dmatest_info {
/* debugfs related stuff */
struct dentry *root;
- struct dmatest_params dbgfs_params;
/* Test results */
struct list_head results;
@@ -406,7 +407,11 @@ static int thread_result_add(struct dmatest_info *info,
list_add_tail(&tr->node, &r->results);
mutex_unlock(&info->results_lock);
- pr_warn("%s\n", thread_result_get(r->name, tr));
+ if (tr->type == DMATEST_ET_OK)
+ pr_debug("%s\n", thread_result_get(r->name, tr));
+ else
+ pr_warn("%s\n", thread_result_get(r->name, tr));
+
return 0;
}
@@ -1007,7 +1012,15 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
result_free(info, NULL);
/* Copy test parameters */
- memcpy(params, &info->dbgfs_params, sizeof(*params));
+ params->buf_size = test_buf_size;
+ strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
+ strlcpy(params->device, strim(test_device), sizeof(params->device));
+ params->threads_per_chan = threads_per_chan;
+ params->max_channels = max_channels;
+ params->iterations = iterations;
+ params->xor_sources = xor_sources;
+ params->pq_sources = pq_sources;
+ params->timeout = timeout;
/* Run test with new parameters */
return __run_threaded_test(info);
@@ -1029,71 +1042,6 @@ static bool __is_threaded_test_run(struct dmatest_info *info)
return false;
}
-static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
- const void __user *from, size_t count)
-{
- char tmp[20];
- ssize_t len;
-
- len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
- if (len >= 0) {
- tmp[len] = '\0';
- strlcpy(to, strim(tmp), available);
- }
-
- return len;
-}
-
-static ssize_t dtf_read_channel(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.channel,
- strlen(info->dbgfs_params.channel));
-}
-
-static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.channel,
- sizeof(info->dbgfs_params.channel),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_channel_fops = {
- .read = dtf_read_channel,
- .write = dtf_write_channel,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static ssize_t dtf_read_device(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return simple_read_from_buffer(buf, count, ppos,
- info->dbgfs_params.device,
- strlen(info->dbgfs_params.device));
-}
-
-static ssize_t dtf_write_device(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct dmatest_info *info = file->private_data;
- return dtf_write_string(info->dbgfs_params.device,
- sizeof(info->dbgfs_params.device),
- ppos, buf, size);
-}
-
-static const struct file_operations dtf_device_fops = {
- .read = dtf_read_device,
- .write = dtf_write_device,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1187,8 +1135,6 @@ static const struct file_operations dtf_results_fops = {
static int dmatest_register_dbgfs(struct dmatest_info *info)
{
struct dentry *d;
- struct dmatest_params *params = &info->dbgfs_params;
- int ret = -ENOMEM;
d = debugfs_create_dir("dmatest", NULL);
if (IS_ERR(d))
@@ -1198,81 +1144,24 @@ static int dmatest_register_dbgfs(struct dmatest_info *info)
info->root = d;
- /* Copy initial values */
- memcpy(params, &info->params, sizeof(*params));
-
- /* Test parameters */
-
- d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->buf_size);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_channel_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
- info, &dtf_device_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->threads_per_chan);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->max_channels);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->iterations);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->xor_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->pq_sources);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
- d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
- (u32 *)&params->timeout);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
-
/* Run or stop threaded test */
- d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
- info, &dtf_run_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
+ &dtf_run_fops);
/* Results of test in progress */
- d = debugfs_create_file("results", S_IRUGO, info->root, info,
- &dtf_results_fops);
- if (IS_ERR_OR_NULL(d))
- goto err_node;
+ debugfs_create_file("results", S_IRUGO, info->root, info,
+ &dtf_results_fops);
return 0;
-err_node:
- debugfs_remove_recursive(info->root);
err_root:
pr_err("dmatest: Failed to initialize debugfs\n");
- return ret;
+ return -ENOMEM;
}
static int __init dmatest_init(void)
{
struct dmatest_info *info = &test_info;
- struct dmatest_params *params = &info->params;
int ret;
memset(info, 0, sizeof(*info));
@@ -1283,17 +1172,6 @@ static int __init dmatest_init(void)
mutex_init(&info->results_lock);
INIT_LIST_HEAD(&info->results);
- /* Set default parameters */
- params->buf_size = test_buf_size;
- strlcpy(params->channel, test_channel, sizeof(params->channel));
- strlcpy(params->device, test_device, sizeof(params->device));
- params->threads_per_chan = threads_per_chan;
- params->max_channels = max_channels;
- params->iterations = iterations;
- params->xor_sources = xor_sources;
- params->pq_sources = pq_sources;
- params->timeout = timeout;
-
ret = dmatest_register_dbgfs(info);
if (ret)
return ret;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index eea479c..89eb89f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -37,16 +37,22 @@
* which does not support descriptor writeback.
*/
+static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
+{
+ return dwc->request_line == (typeof(dwc->request_line))~0;
+}
+
static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_dma_slave *dws = dwc->chan.private;
unsigned char mmax = dw->nr_masters - 1;
- if (dwc->request_line == ~0) {
- dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
- dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
- }
+ if (!is_request_line_unset(dwc))
+ return;
+
+ dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+ dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
}
#define DWC_DEFAULT_CTLLO(_chan) ({ \
@@ -644,10 +650,13 @@ static void dw_dma_tasklet(unsigned long data)
static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
{
struct dw_dma *dw = dev_id;
- u32 status;
+ u32 status = dma_readl(dw, STATUS_INT);
+
+ dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
- dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
- dma_readl(dw, STATUS_INT));
+ /* Check if we have any interrupt from the DMAC */
+ if (!status)
+ return IRQ_NONE;
/*
* Just disable the interrupts. We'll turn them back on in the
@@ -984,7 +993,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
dwc->direction = sconfig->direction;
/* Take the request line from slave_id member */
- if (dwc->request_line == ~0)
+ if (is_request_line_unset(dwc))
dwc->request_line = sconfig->slave_id;
convert_burst(&dwc->dma_sconfig.src_maxburst);
@@ -1089,16 +1098,16 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ if (ret == DMA_SUCCESS)
+ return ret;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS)
dma_set_residue(txstate, dwc_get_residue(dwc));
- if (dwc->paused)
+ if (dwc->paused && ret == DMA_IN_PROGRESS)
return DMA_PAUSED;
return ret;
@@ -1560,8 +1569,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
- "dw_dmac", dw);
+ err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
+ IRQF_SHARED, "dw_dmac", dw);
if (err)
return err;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 6c9449c..e35d975 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -253,6 +253,7 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
#endif
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 5f3e532..ff50ff4 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -56,6 +56,7 @@ struct edma_desc {
struct list_head node;
int absync;
int pset_nr;
+ int processed;
struct edmacc_param pset[0];
};
@@ -69,6 +70,7 @@ struct edma_chan {
int ch_num;
bool alloced;
int slot[EDMA_MAX_SLOTS];
+ int missed;
struct dma_slave_config cfg;
};
@@ -104,22 +106,34 @@ static void edma_desc_free(struct virt_dma_desc *vdesc)
/* Dispatch a queued descriptor to the controller (caller holds lock) */
static void edma_execute(struct edma_chan *echan)
{
- struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
+ struct virt_dma_desc *vdesc;
struct edma_desc *edesc;
- int i;
-
- if (!vdesc) {
- echan->edesc = NULL;
- return;
+ struct device *dev = echan->vchan.chan.device->dev;
+ int i, j, left, nslots;
+
+ /* If either we processed all psets or we're still not started */
+ if (!echan->edesc ||
+ echan->edesc->pset_nr == echan->edesc->processed) {
+ /* Get next vdesc */
+ vdesc = vchan_next_desc(&echan->vchan);
+ if (!vdesc) {
+ echan->edesc = NULL;
+ return;
+ }
+ list_del(&vdesc->node);
+ echan->edesc = to_edma_desc(&vdesc->tx);
}
- list_del(&vdesc->node);
+ edesc = echan->edesc;
- echan->edesc = edesc = to_edma_desc(&vdesc->tx);
+ /* Find out how many left */
+ left = edesc->pset_nr - edesc->processed;
+ nslots = min(MAX_NR_SG, left);
/* Write descriptor PaRAM set(s) */
- for (i = 0; i < edesc->pset_nr; i++) {
- edma_write_slot(echan->slot[i], &edesc->pset[i]);
+ for (i = 0; i < nslots; i++) {
+ j = i + edesc->processed;
+ edma_write_slot(echan->slot[i], &edesc->pset[j]);
dev_dbg(echan->vchan.chan.device->dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
@@ -132,24 +146,50 @@ static void edma_execute(struct edma_chan *echan)
" bidx\t%08x\n"
" cidx\t%08x\n"
" lkrld\t%08x\n",
- i, echan->ch_num, echan->slot[i],
- edesc->pset[i].opt,
- edesc->pset[i].src,
- edesc->pset[i].dst,
- edesc->pset[i].a_b_cnt,
- edesc->pset[i].ccnt,
- edesc->pset[i].src_dst_bidx,
- edesc->pset[i].src_dst_cidx,
- edesc->pset[i].link_bcntrld);
+ j, echan->ch_num, echan->slot[i],
+ edesc->pset[j].opt,
+ edesc->pset[j].src,
+ edesc->pset[j].dst,
+ edesc->pset[j].a_b_cnt,
+ edesc->pset[j].ccnt,
+ edesc->pset[j].src_dst_bidx,
+ edesc->pset[j].src_dst_cidx,
+ edesc->pset[j].link_bcntrld);
/* Link to the previous slot if not the last set */
- if (i != (edesc->pset_nr - 1))
+ if (i != (nslots - 1))
edma_link(echan->slot[i], echan->slot[i+1]);
- /* Final pset links to the dummy pset */
- else
- edma_link(echan->slot[i], echan->ecc->dummy_slot);
}
- edma_start(echan->ch_num);
+ edesc->processed += nslots;
+
+ /*
+ * If this is either the last set in a set of SG-list transactions
+ * then setup a link to the dummy slot, this results in all future
+ * events being absorbed and that's OK because we're done
+ */
+ if (edesc->processed == edesc->pset_nr)
+ edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
+
+ edma_resume(echan->ch_num);
+
+ if (edesc->processed <= MAX_NR_SG) {
+ dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
+ edma_start(echan->ch_num);
+ }
+
+ /*
+ * This happens due to setup times between intermediate transfers
+ * in long SG lists which have to be broken up into transfers of
+ * MAX_NR_SG
+ */
+ if (echan->missed) {
+ dev_dbg(dev, "missed event in execute detected\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ echan->missed = 0;
+ }
}
static int edma_terminate_all(struct edma_chan *echan)
@@ -222,9 +262,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
- int i;
int acnt, bcnt, ccnt, src, dst, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
+ int i, nslots;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
@@ -247,12 +287,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
- if (sg_len > MAX_NR_SG) {
- dev_err(dev, "Exceeded max SG segments %d > %d\n",
- sg_len, MAX_NR_SG);
- return NULL;
- }
-
edesc = kzalloc(sizeof(*edesc) + sg_len *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
@@ -262,8 +296,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc->pset_nr = sg_len;
- for_each_sg(sgl, sg, sg_len, i) {
- /* Allocate a PaRAM slot, if needed */
+ /* Allocate a PaRAM slot, if needed */
+ nslots = min_t(unsigned, MAX_NR_SG, sg_len);
+
+ for (i = 0; i < nslots; i++) {
if (echan->slot[i] < 0) {
echan->slot[i] =
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
@@ -273,6 +309,10 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
}
+ }
+
+ /* Configure PaRAM sets for each SG */
+ for_each_sg(sgl, sg, sg_len, i) {
acnt = dev_width;
@@ -330,6 +370,12 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure A or AB synchronized transfers */
if (edesc->absync)
edesc->pset[i].opt |= SYNCDIM;
+
+ /* If this is the last in a current SG set of transactions,
+ enable interrupts so that next set is processed */
+ if (!((i+1) % MAX_NR_SG))
+ edesc->pset[i].opt |= TCINTEN;
+
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
@@ -355,27 +401,65 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
struct device *dev = echan->vchan.chan.device->dev;
struct edma_desc *edesc;
unsigned long flags;
+ struct edmacc_param p;
- /* Stop the channel */
- edma_stop(echan->ch_num);
+ /* Pause the channel */
+ edma_pause(echan->ch_num);
switch (ch_status) {
case DMA_COMPLETE:
- dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
-
spin_lock_irqsave(&echan->vchan.lock, flags);
edesc = echan->edesc;
if (edesc) {
+ if (edesc->processed == edesc->pset_nr) {
+ dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
+ edma_stop(echan->ch_num);
+ vchan_cookie_complete(&edesc->vdesc);
+ } else {
+ dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
+ }
+
edma_execute(echan);
- vchan_cookie_complete(&edesc->vdesc);
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
case DMA_CC_ERROR:
- dev_dbg(dev, "transfer error on channel %d\n", ch_num);
+ spin_lock_irqsave(&echan->vchan.lock, flags);
+
+ edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
+
+ /*
+ * Issue later based on missed flag which will be sure
+ * to happen as:
+ * (1) we finished transmitting an intermediate slot and
+ * edma_execute is coming up.
+ * (2) or we finished current transfer and issue will
+ * call edma_execute.
+ *
+ * Important note: issuing can be dangerous here and
+ * lead to some nasty recursion when we are in a NULL
+ * slot. So we avoid doing so and set the missed flag.
+ */
+ if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
+ echan->missed = 1;
+ } else {
+ /*
+ * The slot is already programmed but the event got
+ * missed, so its safe to issue it here.
+ */
+ dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
+ edma_clean_channel(echan->ch_num);
+ edma_stop(echan->ch_num);
+ edma_start(echan->ch_num);
+ edma_trigger_channel(echan->ch_num);
+ }
+
+ spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
break;
default:
break;
@@ -502,8 +586,6 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
} else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
struct edma_desc *edesc = echan->edesc;
txstate->residue = edma_desc_size(edesc);
- } else {
- txstate->residue = 0;
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f2bf8c0..591cd8c 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1313,15 +1313,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *state)
{
- struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&edmac->lock, flags);
- ret = dma_cookie_status(chan, cookie, state);
- spin_unlock_irqrestore(&edmac->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, state);
}
/**
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 49e8fbd..b3f3e90 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -979,15 +979,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct fsldma_chan *chan = to_fsl_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ff2aab9..78f8ca5 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -805,10 +805,8 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
}
INIT_LIST_HEAD(&imxdmac->ld_free);
- if (imxdmac->sg_list) {
- kfree(imxdmac->sg_list);
- imxdmac->sg_list = NULL;
- }
+ kfree(imxdmac->sg_list);
+ imxdmac->sg_list = NULL;
}
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1e44b8c..fc43603 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -243,7 +243,6 @@ struct sdma_engine;
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
- * @done channel completion
* @num_bd max NUM_BD. number of descriptors currently handling
*/
struct sdma_channel {
@@ -255,7 +254,6 @@ struct sdma_channel {
unsigned int event_id1;
enum dma_slave_buswidth word_size;
unsigned int buf_tail;
- struct completion done;
unsigned int num_bd;
struct sdma_buffer_descriptor *bd;
dma_addr_t bd_phys;
@@ -307,9 +305,10 @@ struct sdma_firmware_header {
u32 ram_code_size;
};
-enum sdma_devtype {
- IMX31_SDMA, /* runs on i.mx31 */
- IMX35_SDMA, /* runs on i.mx35 and later */
+struct sdma_driver_data {
+ int chnenbl0;
+ int num_events;
+ struct sdma_script_start_addrs *script_addrs;
};
struct sdma_engine {
@@ -318,8 +317,6 @@ struct sdma_engine {
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
- enum sdma_devtype devtype;
- unsigned int num_events;
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
@@ -327,15 +324,118 @@ struct sdma_engine {
struct clk *clk_ahb;
spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
+ const struct sdma_driver_data *drvdata;
+};
+
+static struct sdma_driver_data sdma_imx31 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX31,
+ .num_events = 32,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx25 = {
+ .ap_2_ap_addr = 729,
+ .uart_2_mcu_addr = 904,
+ .per_2_app_addr = 1255,
+ .mcu_2_app_addr = 834,
+ .uartsh_2_mcu_addr = 1120,
+ .per_2_shp_addr = 1329,
+ .mcu_2_shp_addr = 1048,
+ .ata_2_mcu_addr = 1560,
+ .mcu_2_ata_addr = 1479,
+ .app_2_per_addr = 1189,
+ .app_2_mcu_addr = 770,
+ .shp_2_per_addr = 1407,
+ .shp_2_mcu_addr = 979,
+};
+
+static struct sdma_driver_data sdma_imx25 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx25,
+};
+
+static struct sdma_driver_data sdma_imx35 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx51 = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .mcu_2_shp_addr = 961,
+ .ata_2_mcu_addr = 1473,
+ .mcu_2_ata_addr = 1392,
+ .app_2_per_addr = 1033,
+ .app_2_mcu_addr = 683,
+ .shp_2_per_addr = 1251,
+ .shp_2_mcu_addr = 892,
+};
+
+static struct sdma_driver_data sdma_imx51 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx51,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx53 = {
+ .ap_2_ap_addr = 642,
+ .app_2_mcu_addr = 683,
+ .mcu_2_app_addr = 747,
+ .uart_2_mcu_addr = 817,
+ .shp_2_mcu_addr = 891,
+ .mcu_2_shp_addr = 960,
+ .uartsh_2_mcu_addr = 1032,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+ .firi_2_mcu_addr = 1193,
+ .mcu_2_firi_addr = 1290,
+};
+
+static struct sdma_driver_data sdma_imx53 = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx53,
+};
+
+static struct sdma_script_start_addrs sdma_script_imx6q = {
+ .ap_2_ap_addr = 642,
+ .uart_2_mcu_addr = 817,
+ .mcu_2_app_addr = 747,
+ .per_2_per_addr = 6331,
+ .uartsh_2_mcu_addr = 1032,
+ .mcu_2_shp_addr = 960,
+ .app_2_mcu_addr = 683,
+ .shp_2_mcu_addr = 891,
+ .spdif_2_mcu_addr = 1100,
+ .mcu_2_spdif_addr = 1134,
+};
+
+static struct sdma_driver_data sdma_imx6q = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx6q,
};
static struct platform_device_id sdma_devtypes[] = {
{
+ .name = "imx25-sdma",
+ .driver_data = (unsigned long)&sdma_imx25,
+ }, {
.name = "imx31-sdma",
- .driver_data = IMX31_SDMA,
+ .driver_data = (unsigned long)&sdma_imx31,
}, {
.name = "imx35-sdma",
- .driver_data = IMX35_SDMA,
+ .driver_data = (unsigned long)&sdma_imx35,
+ }, {
+ .name = "imx51-sdma",
+ .driver_data = (unsigned long)&sdma_imx51,
+ }, {
+ .name = "imx53-sdma",
+ .driver_data = (unsigned long)&sdma_imx53,
+ }, {
+ .name = "imx6q-sdma",
+ .driver_data = (unsigned long)&sdma_imx6q,
}, {
/* sentinel */
}
@@ -343,8 +443,11 @@ static struct platform_device_id sdma_devtypes[] = {
MODULE_DEVICE_TABLE(platform, sdma_devtypes);
static const struct of_device_id sdma_dt_ids[] = {
- { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
- { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+ { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
+ { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
+ { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
+ { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
+ { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -356,8 +459,7 @@ MODULE_DEVICE_TABLE(of, sdma_dt_ids);
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
- u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
- SDMA_CHNENBL0_IMX35);
+ u32 chnenbl0 = sdma->drvdata->chnenbl0;
return chnenbl0 + event * 4;
}
@@ -547,8 +649,6 @@ static void sdma_tasklet(unsigned long data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
- complete(&sdmac->done);
-
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
else
@@ -733,7 +833,7 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
sdmac->per_addr = 0;
if (sdmac->event_id0) {
- if (sdmac->event_id0 >= sdmac->sdma->num_events)
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
}
@@ -812,9 +912,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
-
- init_completion(&sdmac->done);
-
return 0;
out:
@@ -1120,15 +1217,12 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
}
static enum dma_status sdma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
- dma_cookie_t last_used;
-
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used,
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
@@ -1218,19 +1312,6 @@ static int __init sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- switch (sdma->devtype) {
- case IMX31_SDMA:
- sdma->num_events = 32;
- break;
- case IMX35_SDMA:
- sdma->num_events = 48;
- break;
- default:
- dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
- sdma->devtype);
- return -ENODEV;
- }
-
clk_enable(sdma->clk_ipg);
clk_enable(sdma->clk_ahb);
@@ -1257,7 +1338,7 @@ static int __init sdma_init(struct sdma_engine *sdma)
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
/* disable all channels */
- for (i = 0; i < sdma->num_events; i++)
+ for (i = 0; i < sdma->drvdata->num_events; i++)
writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
/* All channels have priority 0 */
@@ -1335,10 +1416,21 @@ static int __init sdma_probe(struct platform_device *pdev)
int ret;
int irq;
struct resource *iores;
- struct sdma_platform_data *pdata = pdev->dev.platform_data;
+ struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
int i;
struct sdma_engine *sdma;
s32 *saddr_arr;
+ const struct sdma_driver_data *drvdata = NULL;
+
+ if (of_id)
+ drvdata = of_id->data;
+ else if (pdev->id_entry)
+ drvdata = (void *)pdev->id_entry->driver_data;
+
+ if (!drvdata) {
+ dev_err(&pdev->dev, "unable to find driver data\n");
+ return -EINVAL;
+ }
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
@@ -1347,6 +1439,7 @@ static int __init sdma_probe(struct platform_device *pdev)
spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
+ sdma->drvdata = drvdata;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -1396,10 +1489,6 @@ static int __init sdma_probe(struct platform_device *pdev)
for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
saddr_arr[i] = -EINVAL;
- if (of_id)
- pdev->id_entry = of_id->data;
- sdma->devtype = pdev->id_entry->driver_data;
-
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1431,6 +1520,8 @@ static int __init sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
+ if (sdma->drvdata->script_addrs)
+ sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index b642e03..d8ececa 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -251,7 +251,7 @@ static bool is_bwd_noraid(struct pci_dev *pdev)
}
static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
- dma_addr_t addr, u32 offset, u8 coef, int idx)
+ dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
{
struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
struct ioat_pq16a_descriptor *pq16 =
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_xeon_cb32(pdev))
- dma->copy_align = 6;
-
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
- if (is_bwd_noraid(pdev))
+ if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma_set_maxpq(dma, 16, 0);
- dma->pq_align = 0;
} else {
dma_set_maxpq(dma, 8, 0);
- if (is_xeon_cb32(pdev))
- dma->pq_align = 6;
- else
- dma->pq_align = 0;
}
if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (device->cap & IOAT_CAP_RAID16SS) {
dma->max_xor = 16;
- dma->xor_align = 0;
} else {
dma->max_xor = 8;
- if (is_xeon_cb32(pdev))
- dma->xor_align = 6;
- else
- dma->xor_align = 0;
}
}
}
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
- if (is_xeon_cb32(pdev)) {
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
-
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- }
-
/* starting with CB3.3 super extended descriptors are supported */
if (device->cap & IOAT_CAP_RAID16SS) {
char pool_name[14];
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index cc727ec..dd8b44a 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -518,7 +518,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
struct iop_adma_desc_slot *slot = NULL;
int init = iop_chan->slots_allocated ? 0 : 1;
struct iop_adma_platform_data *plat_data =
- iop_chan->device->pdev->dev.platform_data;
+ dev_get_platdata(&iop_chan->device->pdev->dev);
int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
/* Allocate descriptor slots */
@@ -1351,7 +1351,7 @@ static int iop_adma_remove(struct platform_device *dev)
struct iop_adma_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
struct iop_adma_chan *iop_chan;
- struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
dma_async_device_unregister(&device->common);
@@ -1376,7 +1376,7 @@ static int iop_adma_probe(struct platform_device *pdev)
struct iop_adma_device *adev;
struct iop_adma_chan *iop_chan;
struct dma_device *dma_dev;
- struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
+ struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index d39c2cd..cb9c0bc 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1593,10 +1593,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
static enum dma_status idmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
- if (cookie != chan->cookie)
- return DMA_ERROR;
- return DMA_SUCCESS;
+ return dma_cookie_status(chan, cookie, txstate);
}
static int __init ipu_idmac_init(struct ipu *ipu)
@@ -1767,7 +1764,6 @@ static int ipu_remove(struct platform_device *pdev)
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
tasklet_kill(&ipu->tasklet);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
new file mode 100644
index 0000000..a2c330f
--- /dev/null
+++ b/drivers/dma/k3dma.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2013 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME "k3-dma"
+#define DMA_ALIGN 3
+#define DMA_MAX_SIZE 0x1ffc
+
+#define INT_STAT 0x00
+#define INT_TC1 0x04
+#define INT_ERR1 0x0c
+#define INT_ERR2 0x10
+#define INT_TC1_MASK 0x18
+#define INT_ERR1_MASK 0x20
+#define INT_ERR2_MASK 0x24
+#define INT_TC1_RAW 0x600
+#define INT_ERR1_RAW 0x608
+#define INT_ERR2_RAW 0x610
+#define CH_PRI 0x688
+#define CH_STAT 0x690
+#define CX_CUR_CNT 0x704
+#define CX_LLI 0x800
+#define CX_CNT 0x810
+#define CX_SRC 0x814
+#define CX_DST 0x818
+#define CX_CFG 0x81c
+#define AXI_CFG 0x820
+#define AXI_CFG_DEFAULT 0x201201
+
+#define CX_LLI_CHAIN_EN 0x2
+#define CX_CFG_EN 0x1
+#define CX_CFG_MEM2PER (0x1 << 2)
+#define CX_CFG_PER2MEM (0x2 << 2)
+#define CX_CFG_SRCINCR (0x1 << 31)
+#define CX_CFG_DSTINCR (0x1 << 30)
+
+struct k3_desc_hw {
+ u32 lli;
+ u32 reserved[3];
+ u32 count;
+ u32 saddr;
+ u32 daddr;
+ u32 config;
+} __aligned(32);
+
+struct k3_dma_desc_sw {
+ struct virt_dma_desc vd;
+ dma_addr_t desc_hw_lli;
+ size_t desc_num;
+ size_t size;
+ struct k3_desc_hw desc_hw[0];
+};
+
+struct k3_dma_phy;
+
+struct k3_dma_chan {
+ u32 ccfg;
+ struct virt_dma_chan vc;
+ struct k3_dma_phy *phy;
+ struct list_head node;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+ enum dma_status status;
+};
+
+struct k3_dma_phy {
+ u32 idx;
+ void __iomem *base;
+ struct k3_dma_chan *vchan;
+ struct k3_dma_desc_sw *ds_run;
+ struct k3_dma_desc_sw *ds_done;
+};
+
+struct k3_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ struct tasklet_struct task;
+ spinlock_t lock;
+ struct list_head chan_pending;
+ struct k3_dma_phy *phy;
+ struct k3_dma_chan *chans;
+ struct clk *clk;
+ u32 dma_channels;
+ u32 dma_requests;
+};
+
+#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+
+static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct k3_dma_chan, vc.chan);
+}
+
+static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
+{
+ u32 val = 0;
+
+ if (on) {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val |= CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ } else {
+ val = readl_relaxed(phy->base + CX_CFG);
+ val &= ~CX_CFG_EN;
+ writel_relaxed(val, phy->base + CX_CFG);
+ }
+}
+
+static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
+{
+ u32 val = 0;
+
+ k3_dma_pause_dma(phy, false);
+
+ val = 0x1 << phy->idx;
+ writel_relaxed(val, d->base + INT_TC1_RAW);
+ writel_relaxed(val, d->base + INT_ERR1_RAW);
+ writel_relaxed(val, d->base + INT_ERR2_RAW);
+}
+
+static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
+{
+ writel_relaxed(hw->lli, phy->base + CX_LLI);
+ writel_relaxed(hw->count, phy->base + CX_CNT);
+ writel_relaxed(hw->saddr, phy->base + CX_SRC);
+ writel_relaxed(hw->daddr, phy->base + CX_DST);
+ writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
+ writel_relaxed(hw->config, phy->base + CX_CFG);
+}
+
+static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
+{
+ u32 cnt = 0;
+
+ cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
+ cnt &= 0xffff;
+ return cnt;
+}
+
+static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
+{
+ return readl_relaxed(phy->base + CX_LLI);
+}
+
+static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
+{
+ return readl_relaxed(d->base + CH_STAT);
+}
+
+static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
+{
+ if (on) {
+ /* set same priority */
+ writel_relaxed(0x0, d->base + CH_PRI);
+
+ /* unmask irq */
+ writel_relaxed(0xffff, d->base + INT_TC1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
+ writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
+ } else {
+ /* mask irq */
+ writel_relaxed(0x0, d->base + INT_TC1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR1_MASK);
+ writel_relaxed(0x0, d->base + INT_ERR2_MASK);
+ }
+}
+
+static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c;
+ u32 stat = readl_relaxed(d->base + INT_STAT);
+ u32 tc1 = readl_relaxed(d->base + INT_TC1);
+ u32 err1 = readl_relaxed(d->base + INT_ERR1);
+ u32 err2 = readl_relaxed(d->base + INT_ERR2);
+ u32 i, irq_chan = 0;
+
+ while (stat) {
+ i = __ffs(stat);
+ stat &= (stat - 1);
+ if (likely(tc1 & BIT(i))) {
+ p = &d->phy[i];
+ c = p->vchan;
+ if (c) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_cookie_complete(&p->ds_run->vd);
+ p->ds_done = p->ds_run;
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ irq_chan |= BIT(i);
+ }
+ if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
+ dev_warn(d->slave.dev, "DMA ERR\n");
+ }
+
+ writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
+ writel_relaxed(err1, d->base + INT_ERR1_RAW);
+ writel_relaxed(err2, d->base + INT_ERR2_RAW);
+
+ if (irq_chan) {
+ tasklet_schedule(&d->task);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+static int k3_dma_start_txd(struct k3_dma_chan *c)
+{
+ struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ if (!c->phy)
+ return -EAGAIN;
+
+ if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
+ return -EAGAIN;
+
+ if (vd) {
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+ /*
+ * fetch and remove request from vc->desc_issued
+ * so vc->desc_issued only contains desc pending
+ */
+ list_del(&ds->vd.node);
+ c->phy->ds_run = ds;
+ c->phy->ds_done = NULL;
+ /* start dma */
+ k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
+ return 0;
+ }
+ c->phy->ds_done = NULL;
+ c->phy->ds_run = NULL;
+ return -EAGAIN;
+}
+
+static void k3_dma_tasklet(unsigned long arg)
+{
+ struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
+ struct k3_dma_phy *p;
+ struct k3_dma_chan *c, *cn;
+ unsigned pch, pch_alloc = 0;
+
+ /* check new dma request of running channel in vc->desc_issued */
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&c->vc.lock);
+ p = c->phy;
+ if (p && p->ds_done) {
+ if (k3_dma_start_txd(c)) {
+ /* No current txd associated with this channel */
+ dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+ /* Mark this channel free */
+ c->phy = NULL;
+ p->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&c->vc.lock);
+ }
+
+ /* check new channel request in d->chan_pending */
+ spin_lock_irq(&d->lock);
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ p = &d->phy[pch];
+
+ if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
+ c = list_first_entry(&d->chan_pending,
+ struct k3_dma_chan, node);
+ /* remove from d->chan_pending */
+ list_del_init(&c->node);
+ pch_alloc |= 1 << pch;
+ /* Mark this channel allocated */
+ p->vchan = c;
+ c->phy = p;
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
+ }
+ }
+ spin_unlock_irq(&d->lock);
+
+ for (pch = 0; pch < d->dma_channels; pch++) {
+ if (pch_alloc & (1 << pch)) {
+ p = &d->phy[pch];
+ c = p->vchan;
+ if (c) {
+ spin_lock_irq(&c->vc.lock);
+ k3_dma_start_txd(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+ }
+ }
+}
+
+static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void k3_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d->lock, flags);
+ list_del_init(&c->node);
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ vchan_free_chan_resources(&c->vc);
+ c->ccfg = 0;
+}
+
+static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct k3_dma_phy *p;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ p = c->phy;
+ ret = c->status;
+
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
+ } else if ((!p) || (!p->ds_run)) {
+ bytes = 0;
+ } else {
+ struct k3_dma_desc_sw *ds = p->ds_run;
+ u32 clli = 0, index = 0;
+
+ bytes = k3_dma_get_curr_cnt(d, p);
+ clli = k3_dma_get_curr_lli(p);
+ index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
+ for (; index < ds->desc_num; index++) {
+ bytes += ds->desc_hw[index].count;
+ /* end of lli */
+ if (!ds->desc_hw[index].lli)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ dma_set_residue(state, bytes);
+ return ret;
+}
+
+static void k3_dma_issue_pending(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ /* add request to vc->desc_issued */
+ if (vchan_issue_pending(&c->vc)) {
+ spin_lock(&d->lock);
+ if (!c->phy) {
+ if (list_empty(&c->node)) {
+ /* if new channel, add chan_pending */
+ list_add_tail(&c->node, &d->chan_pending);
+ /* check in tasklet */
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ }
+ spin_unlock(&d->lock);
+ } else
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
+ dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+ if ((num + 1) < ds->desc_num)
+ ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+ sizeof(struct k3_desc_hw);
+ ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
+ ds->desc_hw[num].count = len;
+ ds->desc_hw[num].saddr = src;
+ ds->desc_hw[num].daddr = dst;
+ ds->desc_hw[num].config = ccfg;
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t copy = 0;
+ int num = 0;
+
+ if (!len)
+ return NULL;
+
+ num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->size = len;
+ ds->desc_num = num;
+ num = 0;
+
+ if (!c->ccfg) {
+ /* default is memtomem, without calling device_control */
+ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
+ c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
+ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
+ }
+
+ do {
+ copy = min_t(size_t, len, DMA_MAX_SIZE);
+ k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+ if (c->dir == DMA_MEM_TO_DEV) {
+ src += copy;
+ } else if (c->dir == DMA_DEV_TO_MEM) {
+ dst += copy;
+ } else {
+ src += copy;
+ dst += copy;
+ }
+ len -= copy;
+ } while (len);
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+ enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_desc_sw *ds;
+ size_t len, avail, total = 0;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ int num = sglen, i;
+
+ if (sgl == 0)
+ return NULL;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ avail = sg_dma_len(sg);
+ if (avail > DMA_MAX_SIZE)
+ num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+ }
+
+ ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
+ if (!ds) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ return NULL;
+ }
+ ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
+ ds->desc_num = num;
+ num = 0;
+
+ for_each_sg(sgl, sg, sglen, i) {
+ addr = sg_dma_address(sg);
+ avail = sg_dma_len(sg);
+ total += avail;
+
+ do {
+ len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = c->dev_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = c->dev_addr;
+ dst = addr;
+ }
+
+ k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+ addr += len;
+ avail -= len;
+ } while (avail);
+ }
+
+ ds->desc_hw[num-1].lli = 0; /* end of link */
+ ds->size = total;
+ return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+ struct k3_dma_dev *d = to_k3_dma(chan->device);
+ struct dma_slave_config *cfg = (void *)arg;
+ struct k3_dma_phy *p = c->phy;
+ unsigned long flags;
+ u32 maxburst = 0, val = 0;
+ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ LIST_HEAD(head);
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ if (cfg == NULL)
+ return -EINVAL;
+ c->dir = cfg->direction;
+ if (c->dir == DMA_DEV_TO_MEM) {
+ c->ccfg = CX_CFG_DSTINCR;
+ c->dev_addr = cfg->src_addr;
+ maxburst = cfg->src_maxburst;
+ width = cfg->src_addr_width;
+ } else if (c->dir == DMA_MEM_TO_DEV) {
+ c->ccfg = CX_CFG_SRCINCR;
+ c->dev_addr = cfg->dst_addr;
+ maxburst = cfg->dst_maxburst;
+ width = cfg->dst_addr_width;
+ }
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ val = __ffs(width);
+ break;
+ default:
+ val = 3;
+ break;
+ }
+ c->ccfg |= (val << 12) | (val << 16);
+
+ if ((maxburst == 0) || (maxburst > 16))
+ val = 16;
+ else
+ val = maxburst - 1;
+ c->ccfg |= (val << 20) | (val << 24);
+ c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
+
+ /* specific request line */
+ c->ccfg |= c->vc.chan.chan_id << 4;
+ break;
+
+ case DMA_TERMINATE_ALL:
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /* Clear the tx descriptor lists */
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
+ if (p) {
+ /* vchan is assigned to a pchan - stop the channel */
+ k3_dma_terminate_chan(p, d);
+ c->phy = NULL;
+ p->vchan = NULL;
+ p->ds_run = p->ds_done = NULL;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+ break;
+
+ case DMA_PAUSE:
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+ if (c->status == DMA_IN_PROGRESS) {
+ c->status = DMA_PAUSED;
+ if (p) {
+ k3_dma_pause_dma(p, false);
+ } else {
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+ }
+ }
+ break;
+
+ case DMA_RESUME:
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (c->status == DMA_PAUSED) {
+ c->status = DMA_IN_PROGRESS;
+ if (p) {
+ k3_dma_pause_dma(p, true);
+ } else if (!list_empty(&c->vc.desc_issued)) {
+ spin_lock(&d->lock);
+ list_add_tail(&c->node, &d->chan_pending);
+ spin_unlock(&d->lock);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ break;
+ default:
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static void k3_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+
+ kfree(ds);
+}
+
+static struct of_device_id k3_pdma_dt_ids[] = {
+ { .compatible = "hisilicon,k3-dma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
+
+static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct k3_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+ if (request > d->dma_requests)
+ return NULL;
+
+ return dma_get_slave_channel(&(d->chans[request].vc.chan));
+}
+
+static int k3_dma_probe(struct platform_device *op)
+{
+ struct k3_dma_dev *d;
+ const struct of_device_id *of_id;
+ struct resource *iores;
+ int i, ret, irq = 0;
+
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->base = devm_ioremap_resource(&op->dev, iores);
+ if (IS_ERR(d->base))
+ return PTR_ERR(d->base);
+
+ of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
+ if (of_id) {
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-channels", &d->dma_channels);
+ of_property_read_u32((&op->dev)->of_node,
+ "dma-requests", &d->dma_requests);
+ }
+
+ d->clk = devm_clk_get(&op->dev, NULL);
+ if (IS_ERR(d->clk)) {
+ dev_err(&op->dev, "no dma clk\n");
+ return PTR_ERR(d->clk);
+ }
+
+ irq = platform_get_irq(op, 0);
+ ret = devm_request_irq(&op->dev, irq,
+ k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+ if (ret)
+ return ret;
+
+ /* init phy channel */
+ d->phy = devm_kzalloc(&op->dev,
+ d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
+ if (d->phy == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_channels; i++) {
+ struct k3_dma_phy *p = &d->phy[i];
+
+ p->idx = i;
+ p->base = d->base + i * 0x40;
+ }
+
+ INIT_LIST_HEAD(&d->slave.channels);
+ dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+ d->slave.dev = &op->dev;
+ d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
+ d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
+ d->slave.device_tx_status = k3_dma_tx_status;
+ d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
+ d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
+ d->slave.device_issue_pending = k3_dma_issue_pending;
+ d->slave.device_control = k3_dma_control;
+ d->slave.copy_align = DMA_ALIGN;
+ d->slave.chancnt = d->dma_requests;
+
+ /* init virtual channel */
+ d->chans = devm_kzalloc(&op->dev,
+ d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
+ if (d->chans == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < d->dma_requests; i++) {
+ struct k3_dma_chan *c = &d->chans[i];
+
+ c->status = DMA_IN_PROGRESS;
+ INIT_LIST_HEAD(&c->node);
+ c->vc.desc_free = k3_dma_free_desc;
+ vchan_init(&c->vc, &d->slave);
+ }
+
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ k3_dma_enable_dma(d, true);
+
+ ret = dma_async_device_register(&d->slave);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register((&op->dev)->of_node,
+ k3_of_dma_simple_xlate, d);
+ if (ret)
+ goto of_dma_register_fail;
+
+ spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->chan_pending);
+ tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
+ platform_set_drvdata(op, d);
+ dev_info(&op->dev, "initialized\n");
+
+ return 0;
+
+of_dma_register_fail:
+ dma_async_device_unregister(&d->slave);
+ return ret;
+}
+
+static int k3_dma_remove(struct platform_device *op)
+{
+ struct k3_dma_chan *c, *cn;
+ struct k3_dma_dev *d = platform_get_drvdata(op);
+
+ dma_async_device_unregister(&d->slave);
+ of_dma_controller_free((&op->dev)->of_node);
+
+ list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+ tasklet_kill(&d->task);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_suspend(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ u32 stat = 0;
+
+ stat = k3_dma_get_chan_stat(d);
+ if (stat) {
+ dev_warn(d->slave.dev,
+ "chan %d is running fail to suspend\n", stat);
+ return -1;
+ }
+ k3_dma_enable_dma(d, false);
+ clk_disable_unprepare(d->clk);
+ return 0;
+}
+
+static int k3_dma_resume(struct device *dev)
+{
+ struct k3_dma_dev *d = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = clk_prepare_enable(d->clk);
+ if (ret < 0) {
+ dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+ k3_dma_enable_dma(d, true);
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+
+static struct platform_driver k3_pdma_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &k3_dma_pmops,
+ .of_match_table = k3_pdma_dt_ids,
+ },
+ .probe = k3_dma_probe,
+ .remove = k3_dma_remove,
+};
+
+module_platform_driver(k3_pdma_driver);
+
+MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
+MODULE_ALIAS("platform:k3dma");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c26699f..ff8d7827 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -18,7 +18,9 @@
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
#include "dmaengine.h"
@@ -47,6 +49,8 @@
#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
#define DCSR_EORINTR (1 << 9) /* The end of Receive */
+#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
+ (((n) & 0x3f) << 2))
#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
@@ -69,7 +73,7 @@
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
#define PDMA_ALIGNMENT 3
-#define PDMA_MAX_DESC_BYTES 0x1000
+#define PDMA_MAX_DESC_BYTES DCMD_LENGTH
struct mmp_pdma_desc_hw {
u32 ddadr; /* Points to the next descriptor + flags */
@@ -94,6 +98,9 @@ struct mmp_pdma_chan {
struct mmp_pdma_phy *phy;
enum dma_transfer_direction dir;
+ struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
+ * is in cyclic mode */
+
/* channel's basic info */
struct tasklet_struct tasklet;
u32 dcmd;
@@ -105,6 +112,7 @@ struct mmp_pdma_chan {
struct list_head chain_pending; /* Link descriptors queue for pending */
struct list_head chain_running; /* Link descriptors queue for running */
bool idle; /* channel statue machine */
+ bool byte_align;
struct dma_pool *desc_pool; /* Descriptors pool */
};
@@ -121,6 +129,7 @@ struct mmp_pdma_device {
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ spinlock_t phy_lock; /* protect alloc/free phy channels */
};
#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
@@ -137,15 +146,21 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
static void enable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dalgn;
if (!phy->vchan)
return;
- reg = phy->vchan->drcmr;
- reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+ reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ dalgn = readl(phy->base + DALGN);
+ if (phy->vchan->byte_align)
+ dalgn |= 1 << phy->idx;
+ else
+ dalgn &= ~(1 << phy->idx);
+ writel(dalgn, phy->base + DALGN);
+
reg = (phy->idx << 2) + DCSR;
writel(readl(phy->base + reg) | DCSR_RUN,
phy->base + reg);
@@ -218,7 +233,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
{
int prio, i;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
- struct mmp_pdma_phy *phy;
+ struct mmp_pdma_phy *phy, *found = NULL;
+ unsigned long flags;
/*
* dma channel priorities
@@ -227,6 +243,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
* ch 8 - 11, 24 - 27 <--> (2)
* ch 12 - 15, 28 - 31 <--> (3)
*/
+
+ spin_lock_irqsave(&pdev->phy_lock, flags);
for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
for (i = 0; i < pdev->dma_channels; i++) {
if (prio != ((i & 0xf) >> 2))
@@ -234,31 +252,34 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
phy = &pdev->phy[i];
if (!phy->vchan) {
phy->vchan = pchan;
- return phy;
+ found = phy;
+ goto out_unlock;
}
}
}
- return NULL;
+out_unlock:
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
+ return found;
}
-/* desc->tx_list ==> pending list */
-static void append_pending_queue(struct mmp_pdma_chan *chan,
- struct mmp_pdma_desc_sw *desc)
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
{
- struct mmp_pdma_desc_sw *tail =
- to_mmp_pdma_desc(chan->chain_pending.prev);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+ unsigned long flags;
+ u32 reg;
- if (list_empty(&chan->chain_pending))
- goto out_splice;
+ if (!pchan->phy)
+ return;
- /* one irq per queue, even appended */
- tail->desc.ddadr = desc->async_tx.phys;
- tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+ /* clear the channel mapping in DRCMR */
+ reg = DRCMR(pchan->phy->vchan->drcmr);
+ writel(0, pchan->phy->base + reg);
- /* softly link to pending list */
-out_splice:
- list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+ spin_lock_irqsave(&pdev->phy_lock, flags);
+ pchan->phy->vchan = NULL;
+ pchan->phy = NULL;
+ spin_unlock_irqrestore(&pdev->phy_lock, flags);
}
/**
@@ -277,10 +298,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
if (list_empty(&chan->chain_pending)) {
/* chance to re-fetch phy channel with higher prio */
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
dev_dbg(chan->dev, "no pending list\n");
return;
}
@@ -326,14 +344,16 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(&child->async_tx);
}
- append_pending_queue(chan, desc);
+ /* softly link to pending list - desc->tx_list ==> pending list */
+ list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
-struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
dma_addr_t pdesc;
@@ -377,10 +397,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
dev_err(chan->dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
chan->idle = true;
chan->dev_addr = 0;
return 1;
@@ -411,10 +428,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
chan->idle = true;
chan->dev_addr = 0;
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
return;
}
@@ -434,6 +448,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
return NULL;
chan = to_mmp_pdma_chan(dchan);
+ chan->byte_align = false;
if (!chan->dir) {
chan->dir = DMA_MEM_TO_MEM;
@@ -450,6 +465,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
}
copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+ if (dma_src & 0x7 || dma_dst & 0x7)
+ chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
new->desc.dsadr = dma_src;
@@ -486,6 +503,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->cyclic_first = NULL;
+
return &first->async_tx;
fail:
@@ -509,12 +528,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
if ((sgl == NULL) || (sg_len == 0))
return NULL;
+ chan->byte_align = false;
+
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
avail = sg_dma_len(sgl);
do {
len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+ if (addr & 0x7)
+ chan->byte_align = true;
/* allocate and populate the descriptor */
new = mmp_pdma_alloc_descriptor(chan);
@@ -557,6 +580,94 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.ddadr = DDADR_STOP;
new->desc.dcmd |= DCMD_ENDIRQEN;
+ chan->dir = dir;
+ chan->cyclic_first = NULL;
+
+ return &first->async_tx;
+
+fail:
+ if (first)
+ mmp_pdma_free_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
+ struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct mmp_pdma_chan *chan;
+ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+ dma_addr_t dma_src, dma_dst;
+
+ if (!dchan || !len || !period_len)
+ return NULL;
+
+ /* the buffer length must be a multiple of period_len */
+ if (len % period_len != 0)
+ return NULL;
+
+ if (period_len > PDMA_MAX_DESC_BYTES)
+ return NULL;
+
+ chan = to_mmp_pdma_chan(dchan);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dma_src = buf_addr;
+ dma_dst = chan->dev_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ dma_dst = buf_addr;
+ dma_src = chan->dev_addr;
+ break;
+ default:
+ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
+ return NULL;
+ }
+
+ chan->dir = direction;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = mmp_pdma_alloc_descriptor(chan);
+ if (!new) {
+ dev_err(chan->dev, "no memory for desc\n");
+ goto fail;
+ }
+
+ new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
+ (DCMD_LENGTH & period_len);
+ new->desc.dsadr = dma_src;
+ new->desc.dtadr = dma_dst;
+
+ if (!first)
+ first = new;
+ else
+ prev->desc.ddadr = new->async_tx.phys;
+
+ new->async_tx.cookie = 0;
+ async_tx_ack(&new->async_tx);
+
+ prev = new;
+ len -= period_len;
+
+ if (chan->dir == DMA_MEM_TO_DEV)
+ dma_src += period_len;
+ else
+ dma_dst += period_len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ first->async_tx.flags = flags; /* client is in control of this ack */
+ first->async_tx.cookie = -EBUSY;
+
+ /* make the cyclic link */
+ new->desc.ddadr = first->async_tx.phys;
+ chan->cyclic_first = first;
+
return &first->async_tx;
fail:
@@ -581,10 +692,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
disable_chan(chan->phy);
- if (chan->phy) {
- chan->phy->vchan = NULL;
- chan->phy = NULL;
- }
+ mmp_pdma_free_phy(chan);
spin_lock_irqsave(&chan->desc_lock, flags);
mmp_pdma_free_desc_list(chan, &chan->chain_pending);
mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -619,8 +727,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
chan->dcmd |= DCMD_BURST32;
chan->dir = cfg->direction;
- chan->drcmr = cfg->slave_id;
chan->dev_addr = addr;
+ /* FIXME: drivers should be ported over to use the filter
+ * function. Once that's done, the following two lines can
+ * be removed.
+ */
+ if (cfg->slave_id)
+ chan->drcmr = cfg->slave_id;
break;
default:
return -ENOSYS;
@@ -632,15 +745,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
- struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&chan->desc_lock, flags);
- ret = dma_cookie_status(dchan, cookie, txstate);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
/**
@@ -669,29 +774,51 @@ static void dma_do_tasklet(unsigned long data)
LIST_HEAD(chain_cleanup);
unsigned long flags;
- /* submit pending list; callback for each desc; free desc */
+ if (chan->cyclic_first) {
+ dma_async_tx_callback cb = NULL;
+ void *cb_data = NULL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ desc = chan->cyclic_first;
+ cb = desc->async_tx.callback;
+ cb_data = desc->async_tx.callback_param;
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ if (cb)
+ cb(cb_data);
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->chain_running)) {
- dma_cookie_t cookie;
+ return;
+ }
- desc = to_mmp_pdma_desc(chan->chain_running.prev);
- cookie = desc->async_tx.cookie;
- dma_cookie_complete(&desc->async_tx);
+ /* submit pending list; callback for each desc; free desc */
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
+ /*
+ * move the descriptors to a temporary list so we can drop
+ * the lock during the entire cleanup operation
+ */
+ list_del(&desc->node);
+ list_add(&desc->node, &chain_cleanup);
+
+ /*
+ * Look for the first list entry which has the ENDIRQEN flag
+ * set. That is the descriptor we got an interrupt for, so
+ * complete that transaction and its cookie.
+ */
+ if (desc->desc.dcmd & DCMD_ENDIRQEN) {
+ dma_cookie_t cookie = desc->async_tx.cookie;
+ dma_cookie_complete(&desc->async_tx);
+ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+ break;
+ }
}
/*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
+ * The hardware is idle and ready for more when the
+ * chain_running list is empty.
*/
- list_splice_tail_init(&chan->chain_running, &chain_cleanup);
-
- /* the hardware is now idle and ready for more */
- chan->idle = true;
+ chan->idle = list_empty(&chan->chain_running);
/* Start any pending transactions automatically */
start_pending_queue(chan);
@@ -763,6 +890,39 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct mmp_pdma_device *d = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate;
+
+retry:
+ candidate = NULL;
+
+ /* walk the list of channels registered with the current instance and
+ * find one that is currently unused */
+ list_for_each_entry(chan, &d->device.channels, device_node)
+ if (chan->client_count == 0) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ /* dma_get_slave_channel will return NULL if we lost a race between
+ * the lookup and the reservation */
+ chan = dma_get_slave_channel(candidate);
+
+ if (chan) {
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+ c->drcmr = dma_spec->args[0];
+ return chan;
+ }
+
+ goto retry;
+}
+
static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
@@ -777,10 +937,9 @@ static int mmp_pdma_probe(struct platform_device *op)
return -ENOMEM;
pdev->dev = &op->dev;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
+ spin_lock_init(&pdev->phy_lock);
+ iores = platform_get_resource(op, IORESOURCE_MEM, 0);
pdev->base = devm_ioremap_resource(pdev->dev, iores);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
@@ -825,13 +984,15 @@ static int mmp_pdma_probe(struct platform_device *op)
dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
- dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
pdev->device.dev = &op->dev;
pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
pdev->device.device_tx_status = mmp_pdma_tx_status;
pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
pdev->device.device_issue_pending = mmp_pdma_issue_pending;
pdev->device.device_control = mmp_pdma_control;
pdev->device.copy_align = PDMA_ALIGNMENT;
@@ -847,7 +1008,17 @@ static int mmp_pdma_probe(struct platform_device *op)
return ret;
}
- dev_info(pdev->device.dev, "initialized\n");
+ if (op->dev.of_node) {
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(op->dev.of_node,
+ mmp_pdma_dma_xlate, pdev);
+ if (ret < 0) {
+ dev_err(&op->dev, "of_dma_controller_register failed\n");
+ return ret;
+ }
+ }
+
+ dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}
@@ -867,6 +1038,19 @@ static struct platform_driver mmp_pdma_driver = {
.remove = mmp_pdma_remove,
};
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+ if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+ return false;
+
+ c->drcmr = *(unsigned int *) param;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
module_platform_driver(mmp_pdma_driver);
MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 9b93665..38cb517 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -460,7 +460,8 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
- dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+ tdmac->buf_len - tdmac->pos);
return tdmac->status;
}
@@ -549,9 +550,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
}
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
tdev->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2d95673..2fe4353 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -556,15 +556,7 @@ static enum dma_status
mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
- enum dma_status ret;
- unsigned long flags;
-
- spin_lock_irqsave(&mchan->lock, flags);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irqrestore(&mchan->lock, flags);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
/* Prepare descriptor for memory to memory copy */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 200f1a3..536dcb8 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -64,7 +64,7 @@ static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
int src_idx)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_src_addr[src_idx];
+ return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
}
@@ -107,32 +107,32 @@ static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- hw_desc->phy_src_addr[index] = addr;
+ hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
if (desc->type == DMA_XOR)
hw_desc->desc_command |= (1 << index);
}
static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
{
- return __raw_readl(XOR_CURR_DESC(chan));
+ return readl_relaxed(XOR_CURR_DESC(chan));
}
static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
u32 next_desc_addr)
{
- __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
+ writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
}
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{
- u32 val = __raw_readl(XOR_INTR_MASK(chan));
+ u32 val = readl_relaxed(XOR_INTR_MASK(chan));
val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
- __raw_writel(val, XOR_INTR_MASK(chan));
+ writel_relaxed(val, XOR_INTR_MASK(chan));
}
static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
{
- u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+ u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
return intr_cause;
}
@@ -149,13 +149,13 @@ static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
{
u32 val = 0xFFFF0000 >> (chan->idx * 16);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@ -173,7 +173,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
enum dma_transaction_type type)
{
u32 op_mode;
- u32 config = __raw_readl(XOR_CONFIG(chan));
+ u32 config = readl_relaxed(XOR_CONFIG(chan));
switch (type) {
case DMA_XOR:
@@ -192,7 +192,14 @@ static void mv_set_mode(struct mv_xor_chan *chan,
config &= ~0x7;
config |= op_mode;
- __raw_writel(config, XOR_CONFIG(chan));
+
+#if defined(__BIG_ENDIAN)
+ config |= XOR_DESCRIPTOR_SWAP;
+#else
+ config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
+
+ writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;
}
@@ -201,14 +208,14 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
u32 activation;
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- activation = __raw_readl(XOR_ACTIVATION(chan));
+ activation = readl_relaxed(XOR_ACTIVATION(chan));
activation |= 0x1;
- __raw_writel(activation, XOR_ACTIVATION(chan));
+ writel_relaxed(activation, XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
{
- u32 state = __raw_readl(XOR_ACTIVATION(chan));
+ u32 state = readl_relaxed(XOR_ACTIVATION(chan));
state = (state >> 4) & 0x3;
@@ -647,7 +654,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
- __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+ __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
return sw_desc ? &sw_desc->async_tx : NULL;
}
@@ -755,22 +762,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
{
u32 val;
- val = __raw_readl(XOR_CONFIG(chan));
+ val = readl_relaxed(XOR_CONFIG(chan));
dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
- val = __raw_readl(XOR_ACTIVATION(chan));
+ val = readl_relaxed(XOR_ACTIVATION(chan));
dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_CAUSE(chan));
+ val = readl_relaxed(XOR_INTR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_MASK(chan));
+ val = readl_relaxed(XOR_INTR_MASK(chan));
dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_CAUSE(chan));
+ val = readl_relaxed(XOR_ERROR_CAUSE(chan));
dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_ADDR(chan));
+ val = readl_relaxed(XOR_ERROR_ADDR(chan));
dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
@@ -1029,10 +1036,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
struct dma_device *dma_dev;
mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
+ if (!mv_chan)
+ return ERR_PTR(-ENOMEM);
mv_chan->idx = idx;
mv_chan->irq = irq;
@@ -1166,7 +1171,7 @@ static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
struct mv_xor_device *xordev;
- struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
int i, ret;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c619359..06b067f 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -29,8 +29,10 @@
#define MV_XOR_THRESHOLD 1
#define MV_XOR_MAX_CHANNELS 2
+/* Values for the XOR_CONFIG register */
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
+#define XOR_DESCRIPTOR_SWAP BIT(14)
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
@@ -143,7 +145,16 @@ struct mv_xor_desc_slot {
#endif
};
-/* This structure describes XOR descriptor size 64bytes */
+/*
+ * This structure describes XOR descriptor size 64bytes. The
+ * mv_phy_src_idx() macro must be used when indexing the values of the
+ * phy_src_addr[] array. This is due to the fact that the 'descriptor
+ * swap' feature, used on big endian systems, swaps descriptors data
+ * within blocks of 8 bytes. So two consecutive values of the
+ * phy_src_addr[] array are actually swapped in big-endian, which
+ * explains the different mv_phy_src_idx() implementation.
+ */
+#if defined(__LITTLE_ENDIAN)
struct mv_xor_desc {
u32 status; /* descriptor execution status */
u32 crc32_result; /* result of CRC-32 calculation */
@@ -155,6 +166,21 @@ struct mv_xor_desc {
u32 reserved0;
u32 reserved1;
};
+#define mv_phy_src_idx(src_idx) (src_idx)
+#else
+struct mv_xor_desc {
+ u32 crc32_result; /* result of CRC-32 calculation */
+ u32 status; /* descriptor execution status */
+ u32 phy_next_desc; /* next descriptor address pointer */
+ u32 desc_command; /* type of operation to be carried out */
+ u32 phy_dest_addr; /* destination block address */
+ u32 byte_count; /* size of src/dst blocks in bytes */
+ u32 phy_src_addr[8]; /* source block addresses */
+ u32 reserved1;
+ u32 reserved0;
+};
+#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
+#endif
#define to_mv_sw_desc(addr_hw_desc) \
container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 7195930..ccd13df 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -23,7 +23,6 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/fsl/mxs-dma.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -197,24 +196,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
return container_of(chan, struct mxs_dma_chan, chan);
}
-int mxs_dma_is_apbh(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
-
-int mxs_dma_is_apbx(struct dma_chan *chan)
-{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
- return !dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -349,13 +330,9 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- struct mxs_dma_data *data = chan->private;
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- if (data)
- mxs_chan->chan_irq = data->chan_irq;
-
mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
GFP_KERNEL);
@@ -622,10 +599,8 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
- dma_cookie_t last_used;
- last_used = chan->cookie;
- dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
+ dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
return mxs_chan->status;
}
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 75334bd..0b88dd3 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -160,7 +160,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
count = of_property_count_strings(np, "dma-names");
if (count < 0) {
- pr_err("%s: dma-names property missing or empty\n", __func__);
+ pr_err("%s: dma-names property of node '%s' missing or empty\n",
+ __func__, np->full_name);
return NULL;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 0bbdea5..61fdc54 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -564,14 +564,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct pch_dma_chan *pd_chan = to_pd_chan(chan);
- enum dma_status ret;
-
- spin_lock_irq(&pd_chan->lock);
- ret = dma_cookie_status(chan, cookie, txstate);
- spin_unlock_irq(&pd_chan->lock);
-
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void pd_issue_pending(struct dma_chan *chan)
@@ -1036,3 +1029,4 @@ MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa645d8..a562d24 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -545,6 +545,8 @@ struct dma_pl330_chan {
/* List of to be xfered descriptors */
struct list_head work_list;
+ /* List of completed descriptors */
+ struct list_head completed_list;
/* Pointer to the DMAC that manages this channel,
* NULL if the channel is available to be acquired.
@@ -2198,66 +2200,6 @@ to_desc(struct dma_async_tx_descriptor *tx)
return container_of(tx, struct dma_pl330_desc, txd);
}
-static inline void free_desc_list(struct list_head *list)
-{
- struct dma_pl330_dmac *pdmac;
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- /* Finish off the work list */
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
- void *param;
-
- /* All desc in a list belong to same channel */
- pch = desc->pchan;
- callback = desc->txd.callback;
- param = desc->txd.callback_param;
-
- if (callback)
- callback(param);
-
- desc->pchan = NULL;
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- pdmac = pch->dmac;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
- list_splice_tail_init(list, &pdmac->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
-}
-
-static inline void handle_cyclic_desc_list(struct list_head *list)
-{
- struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch = NULL;
- unsigned long flags;
-
- list_for_each_entry(desc, list, node) {
- dma_async_tx_callback callback;
-
- /* Change status to reload it */
- desc->status = PREP;
- pch = desc->pchan;
- callback = desc->txd.callback;
- if (callback)
- callback(desc->txd.callback_param);
- }
-
- /* pch will be unset if list was empty */
- if (!pch)
- return;
-
- spin_lock_irqsave(&pch->lock, flags);
- list_splice_tail_init(list, &pch->work_list);
- spin_unlock_irqrestore(&pch->lock, flags);
-}
-
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -2291,7 +2233,6 @@ static void pl330_tasklet(unsigned long data)
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
- LIST_HEAD(list);
spin_lock_irqsave(&pch->lock, flags);
@@ -2300,7 +2241,7 @@ static void pl330_tasklet(unsigned long data)
if (desc->status == DONE) {
if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
- list_move_tail(&desc->node, &list);
+ list_move_tail(&desc->node, &pch->completed_list);
}
/* Try to submit a req imm. next to the last completed cookie */
@@ -2309,12 +2250,31 @@ static void pl330_tasklet(unsigned long data)
/* Make sure the PL330 Channel thread is active */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
- spin_unlock_irqrestore(&pch->lock, flags);
+ while (!list_empty(&pch->completed_list)) {
+ dma_async_tx_callback callback;
+ void *callback_param;
- if (pch->cyclic)
- handle_cyclic_desc_list(&list);
- else
- free_desc_list(&list);
+ desc = list_first_entry(&pch->completed_list,
+ struct dma_pl330_desc, node);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ if (pch->cyclic) {
+ desc->status = PREP;
+ list_move_tail(&desc->node, &pch->work_list);
+ } else {
+ desc->status = FREE;
+ list_move_tail(&desc->node, &pch->dmac->desc_pool);
+ }
+
+ if (callback) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&pch->lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&pch->lock, flags);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -2409,7 +2369,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_desc *desc, *_dt;
+ struct dma_pl330_desc *desc;
unsigned long flags;
struct dma_pl330_dmac *pdmac = pch->dmac;
struct dma_slave_config *slave_config;
@@ -2423,12 +2383,18 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */
- list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
- desc->status = DONE;
- list_move_tail(&desc->node, &list);
+ list_for_each_entry(desc, &pch->work_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
}
- list_splice_tail_init(&list, &pdmac->desc_pool);
+ list_for_each_entry(desc, &pch->completed_list , node) {
+ desc->status = FREE;
+ dma_cookie_complete(&desc->txd);
+ }
+
+ list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
@@ -2814,6 +2780,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
return &desc->txd;
}
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+ struct dma_pl330_desc *first)
+{
+ unsigned long flags;
+ struct dma_pl330_desc *desc;
+
+ if (!first)
+ return;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2822,7 +2810,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
struct scatterlist *sg;
- unsigned long flags;
int i;
dma_addr_t addr;
@@ -2842,20 +2829,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_err(pch->dmac->pif.dev,
"%s:%d Unable to fetch desc\n",
__func__, __LINE__);
- if (!first)
- return NULL;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
-
- while (!list_empty(&first->node)) {
- desc = list_entry(first->node.next,
- struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
- }
-
- list_move_tail(&first->node, &pdmac->desc_pool);
-
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ __pl330_giveback_desc(pdmac, first);
return NULL;
}
@@ -2896,6 +2870,25 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
return IRQ_NONE;
}
+#define PL330_DMA_BUSWIDTHS \
+ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -2908,7 +2901,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
int i, ret, irq;
int num_chan;
- pdat = adev->dev.platform_data;
+ pdat = dev_get_platdata(&adev->dev);
/* Allocate a new DMAC and its Channels */
pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
@@ -2971,6 +2964,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
+ INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
@@ -3000,6 +2994,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
pd->device_issue_pending = pl330_issue_pending;
+ pd->device_slave_caps = pl330_dma_device_slave_caps;
ret = dma_async_device_register(pd);
if (ret) {
@@ -3015,6 +3010,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
"unable to register DMA to the generic DT DMA helpers\n");
}
}
+ /*
+ * This is the limit for transfers with a buswidth of 1, larger
+ * buswidths will have larger limits.
+ */
+ ret = dma_set_max_seg_size(&adev->dev, 1900800);
+ if (ret)
+ dev_err(&adev->dev, "unable to set the seg size\n");
+
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 5c1dee2..dadd9e01 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -22,3 +22,13 @@ config SUDMAC
depends on SH_DMAE_BASE
help
Enable support for the Renesas SUDMAC controllers.
+
+config RCAR_HPB_DMAE
+ tristate "Renesas R-Car HPB DMAC support"
+ depends on SH_DMAE_BASE
+ help
+ Enable support for the Renesas R-Car series DMA controllers.
+
+config SHDMA_R8A73A4
+ def_bool y
+ depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index c962138..e856af2 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,9 @@
obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+shdma-y := shdmac.o
+ifeq ($(CONFIG_OF),y)
+shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o
+endif
+shdma-objs := $(shdma-y)
obj-$(CONFIG_SUDMAC) += sudmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
new file mode 100644
index 0000000..45a5202
--- /dev/null
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This file is based on the drivers/dma/sh/shdma.c
+ *
+ * Renesas SuperH DMA Engine support
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - max DMA size is 16MB.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_data/dma-rcar-hpbdma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/shdma-base.h>
+#include <linux/slab.h>
+
+/* DMA channel registers */
+#define HPB_DMAE_DSAR0 0x00
+#define HPB_DMAE_DDAR0 0x04
+#define HPB_DMAE_DTCR0 0x08
+#define HPB_DMAE_DSAR1 0x0C
+#define HPB_DMAE_DDAR1 0x10
+#define HPB_DMAE_DTCR1 0x14
+#define HPB_DMAE_DSASR 0x18
+#define HPB_DMAE_DDASR 0x1C
+#define HPB_DMAE_DTCSR 0x20
+#define HPB_DMAE_DPTR 0x24
+#define HPB_DMAE_DCR 0x28
+#define HPB_DMAE_DCMDR 0x2C
+#define HPB_DMAE_DSTPR 0x30
+#define HPB_DMAE_DSTSR 0x34
+#define HPB_DMAE_DDBGR 0x38
+#define HPB_DMAE_DDBGR2 0x3C
+#define HPB_DMAE_CHAN(n) (0x40 * (n))
+
+/* DMA command register (DCMDR) bits */
+#define HPB_DMAE_DCMDR_BDOUT BIT(7)
+#define HPB_DMAE_DCMDR_DQSPD BIT(6)
+#define HPB_DMAE_DCMDR_DQSPC BIT(5)
+#define HPB_DMAE_DCMDR_DMSPD BIT(4)
+#define HPB_DMAE_DCMDR_DMSPC BIT(3)
+#define HPB_DMAE_DCMDR_DQEND BIT(2)
+#define HPB_DMAE_DCMDR_DNXT BIT(1)
+#define HPB_DMAE_DCMDR_DMEN BIT(0)
+
+/* DMA forced stop register (DSTPR) bits */
+#define HPB_DMAE_DSTPR_DMSTP BIT(0)
+
+/* DMA status register (DSTSR) bits */
+#define HPB_DMAE_DSTSR_DMSTS BIT(0)
+
+/* DMA common registers */
+#define HPB_DMAE_DTIMR 0x00
+#define HPB_DMAE_DINTSR0 0x0C
+#define HPB_DMAE_DINTSR1 0x10
+#define HPB_DMAE_DINTCR0 0x14
+#define HPB_DMAE_DINTCR1 0x18
+#define HPB_DMAE_DINTMR0 0x1C
+#define HPB_DMAE_DINTMR1 0x20
+#define HPB_DMAE_DACTSR0 0x24
+#define HPB_DMAE_DACTSR1 0x28
+#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
+#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
+#define HPB_DMAE_HPB_DMLVLR0 0x160
+#define HPB_DMAE_HPB_DMLVLR1 0x164
+#define HPB_DMAE_HPB_DMSHPT0 0x168
+#define HPB_DMAE_HPB_DMSHPT1 0x16C
+
+#define HPB_DMA_SLAVE_NUMBER 256
+#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
+
+struct hpb_dmae_chan {
+ struct shdma_chan shdma_chan;
+ int xfer_mode; /* DMA transfer mode */
+#define XFER_SINGLE 1
+#define XFER_DOUBLE 2
+ unsigned plane_idx; /* current DMA information set */
+ bool first_desc; /* first/next transfer */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ void __iomem *base;
+ const struct hpb_dmae_slave_config *cfg;
+ char dev_id[16]; /* unique name per DMAC of channel */
+};
+
+struct hpb_dmae_device {
+ struct shdma_dev shdma_dev;
+ spinlock_t reg_lock; /* comm_reg operation lock */
+ struct hpb_dmae_pdata *pdata;
+ void __iomem *chan_reg;
+ void __iomem *comm_reg;
+ void __iomem *reset_reg;
+ void __iomem *mode_reg;
+};
+
+struct hpb_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct hpb_desc {
+ struct shdma_desc shdma_desc;
+ struct hpb_dmae_regs hw;
+ unsigned plane_idx;
+};
+
+#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
+#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+ struct hpb_dmae_device, shdma_dev.dma_dev)
+
+static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
+{
+ iowrite32(data, hpb_dc->base + reg);
+}
+
+static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
+{
+ return ioread32(hpb_dc->base + reg);
+}
+
+static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
+}
+
+static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
+}
+
+static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 v;
+
+ if (ch < 32)
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
+ else
+ v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
+ return v & 0x1;
+}
+
+static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ if (ch < 32)
+ iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
+ else
+ iowrite32((0x1 << (ch - 32)),
+ hpbdev->comm_reg + HPB_DMAE_DINTCR1);
+}
+
+static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ iowrite32(data, hpbdev->mode_reg);
+}
+
+static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
+{
+ return ioread32(hpbdev->mode_reg);
+}
+
+static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
+{
+ u32 intreg;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ if (ch < 32) {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ iowrite32(BIT(ch) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR0);
+ } else {
+ intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ iowrite32(BIT(ch - 32) | intreg,
+ hpbdev->comm_reg + HPB_DMAE_DINTMR1);
+ }
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
+{
+ u32 rstr;
+ int timeout = 10000; /* 100 ms */
+
+ spin_lock(&hpbdev->reg_lock);
+ rstr = ioread32(hpbdev->reset_reg);
+ rstr |= data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ do {
+ rstr = ioread32(hpbdev->reset_reg);
+ if ((rstr & data) == data)
+ break;
+ udelay(10);
+ } while (timeout--);
+
+ if (timeout < 0)
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "%s timeout\n", __func__);
+
+ rstr &= ~data;
+ iowrite32(rstr, hpbdev->reset_reg);
+ spin_unlock(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
+ u32 mask, u32 data)
+{
+ u32 mode;
+
+ spin_lock_irq(&hpbdev->reg_lock);
+ mode = asyncmdr_read(hpbdev);
+ mode &= ~mask;
+ mode |= data;
+ asyncmdr_write(hpbdev, mode);
+ spin_unlock_irq(&hpbdev->reg_lock);
+}
+
+static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
+{
+ dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
+}
+
+static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
+{
+ u32 ch;
+
+ for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
+ hsrstr_write(hpbdev, ch);
+}
+
+static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
+ int i;
+
+ switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
+ case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
+ default:
+ i = XMIT_SZ_8BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
+ i = XMIT_SZ_16BIT;
+ break;
+ case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
+ i = XMIT_SZ_32BIT;
+ break;
+ }
+ return pdata->ts_shift[i];
+}
+
+static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
+ struct hpb_dmae_regs *hw, unsigned plane)
+{
+ ch_reg_write(hpb_chan, hw->sar,
+ plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
+ ch_reg_write(hpb_chan, hw->dar,
+ plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
+ ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
+ plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+}
+
+static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
+{
+ ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
+ HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
+}
+
+static void hpb_dmae_halt(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
+ ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
+}
+
+static const struct hpb_dmae_slave_config *
+hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ int i;
+
+ if (slave_id >= HPB_DMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0; i < pdata->num_slaves; i++)
+ if (pdata->slaves[i].id == slave_id)
+ return pdata->slaves + i;
+
+ return NULL;
+}
+
+static void hpb_dmae_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
+ hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
+
+ desc->plane_idx = chan->plane_idx;
+ hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
+ hpb_dmae_start(chan, !chan->first_desc);
+
+ if (chan->xfer_mode == XFER_DOUBLE) {
+ chan->plane_idx ^= 1;
+ chan->first_desc = false;
+ }
+}
+
+static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ /*
+ * This is correct since we always have at most single
+ * outstanding DMA transfer per channel, and by the time
+ * we get completion interrupt the transfer is completed.
+ * This will change if we ever use alternating DMA
+ * information sets and submit two descriptors at once.
+ */
+ return true;
+}
+
+static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ struct hpb_dmae_device *hpbdev = to_dev(chan);
+ int ch = chan->cfg->dma_ch;
+
+ /* Check Complete DMA Transfer */
+ if (dintsr_read(hpbdev, ch)) {
+ /* Clear Interrupt status */
+ dintcr_write(hpbdev, ch);
+ return true;
+ }
+ return false;
+}
+
+static int hpb_dmae_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+
+ if (*len > (size_t)HPB_DMA_TCR_MAX)
+ *len = (size_t)HPB_DMA_TCR_MAX;
+
+ desc->hw.sar = src;
+ desc->hw.dar = dst;
+ desc->hw.tcr = *len;
+
+ return 0;
+}
+
+static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct hpb_desc *desc = to_desc(sdesc);
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 tcr = ch_reg_read(chan, desc->plane_idx ?
+ HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
+
+ return (desc->hw.tcr - tcr) << chan->xmit_shift;
+}
+
+static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
+
+ return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
+}
+
+static int
+hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
+ const struct hpb_dmae_slave_config *cfg)
+{
+ struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
+ struct hpb_dmae_pdata *pdata = hpbdev->pdata;
+ const struct hpb_dmae_channel *channel = pdata->channels;
+ int slave_id = cfg->id;
+ int i, err;
+
+ for (i = 0; i < pdata->num_channels; i++, channel++) {
+ if (channel->s_id == slave_id) {
+ struct device *dev = hpb_chan->shdma_chan.dev;
+
+ hpb_chan->base = hpbdev->chan_reg +
+ HPB_DMAE_CHAN(cfg->dma_ch);
+
+ dev_dbg(dev, "Detected Slave device\n");
+ dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
+ dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
+ dev_dbg(dev, " -- channel->ch_irq: %d\n",
+ channel->ch_irq);
+ break;
+ }
+ }
+
+ err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
+ IRQF_SHARED, hpb_chan->dev_id);
+ if (err) {
+ dev_err(hpb_chan->shdma_chan.dev,
+ "DMA channel request_irq %d failed with error %d\n",
+ channel->ch_irq, err);
+ return err;
+ }
+
+ hpb_chan->plane_idx = 0;
+ hpb_chan->first_desc = true;
+
+ if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
+ hpb_chan->xfer_mode = XFER_SINGLE;
+ } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
+ (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
+ hpb_chan->xfer_mode = XFER_DOUBLE;
+ } else {
+ dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
+ shdma_free_irq(&hpb_chan->shdma_chan);
+ return -EINVAL;
+ }
+
+ if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
+ hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
+ ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
+ ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
+ hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
+ hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
+
+ return 0;
+}
+
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+ const struct hpb_dmae_slave_config *sc =
+ hpb_dmae_find_slave(chan, slave_id);
+
+ if (!sc)
+ return -ENODEV;
+ if (try)
+ return 0;
+ chan->cfg = sc;
+ return hpb_dmae_alloc_chan_resources(chan, sc);
+}
+
+static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
+{
+ struct hpb_dmae_chan *chan = to_chan(schan);
+
+ return chan->cfg->addr;
+}
+
+static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
+{
+ return &((struct hpb_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops hpb_dmae_ops = {
+ .desc_completed = hpb_dmae_desc_completed,
+ .halt_channel = hpb_dmae_halt,
+ .channel_busy = hpb_dmae_channel_busy,
+ .slave_addr = hpb_dmae_slave_addr,
+ .desc_setup = hpb_dmae_desc_setup,
+ .set_slave = hpb_dmae_set_slave,
+ .setup_xfer = hpb_dmae_setup_xfer,
+ .start_xfer = hpb_dmae_start_xfer,
+ .embedded_desc = hpb_dmae_embedded_desc,
+ .chan_irq = hpb_dmae_chan_irq,
+ .get_partial = hpb_dmae_get_partial,
+};
+
+static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
+{
+ struct shdma_dev *sdev = &hpbdev->shdma_dev;
+ struct platform_device *pdev =
+ to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
+ struct hpb_dmae_chan *new_hpb_chan;
+ struct shdma_chan *schan;
+
+ /* Alloc channel */
+ new_hpb_chan = devm_kzalloc(&pdev->dev,
+ sizeof(struct hpb_dmae_chan), GFP_KERNEL);
+ if (!new_hpb_chan) {
+ dev_err(hpbdev->shdma_dev.dma_dev.dev,
+ "No free memory for allocating DMA channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &new_hpb_chan->shdma_chan;
+ shdma_chan_probe(sdev, schan, id);
+
+ if (pdev->id >= 0)
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dmae%d.%d", pdev->id, id);
+ else
+ snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
+ "hpb-dma.%d", id);
+
+ return 0;
+}
+
+static int hpb_dmae_probe(struct platform_device *pdev)
+{
+ struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
+ struct hpb_dmae_device *hpbdev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *comm, *rest, *mode, *irq_res;
+ int err, i;
+
+ /* Get platform data */
+ if (!pdata || !pdata->num_channels)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res)
+ return -ENODEV;
+
+ hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
+ GFP_KERNEL);
+ if (!hpbdev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ return -ENOMEM;
+ }
+
+ hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(hpbdev->chan_reg))
+ return PTR_ERR(hpbdev->chan_reg);
+
+ hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
+ if (IS_ERR(hpbdev->comm_reg))
+ return PTR_ERR(hpbdev->comm_reg);
+
+ hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
+ if (IS_ERR(hpbdev->reset_reg))
+ return PTR_ERR(hpbdev->reset_reg);
+
+ hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
+ if (IS_ERR(hpbdev->mode_reg))
+ return PTR_ERR(hpbdev->mode_reg);
+
+ dma_dev = &hpbdev->shdma_dev.dma_dev;
+
+ spin_lock_init(&hpbdev->reg_lock);
+
+ /* Platform data */
+ hpbdev->pdata = pdata;
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+ /* Reset DMA controller */
+ hpb_dmae_reset(hpbdev);
+
+ pm_runtime_put(&pdev->dev);
+
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ hpbdev->shdma_dev.ops = &hpb_dmae_ops;
+ hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
+ err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
+ if (err < 0)
+ goto error;
+
+ /* Create DMA channels */
+ for (i = 0; i < pdata->num_channels; i++)
+ hpb_dmae_chan_probe(hpbdev, i);
+
+ platform_set_drvdata(pdev, hpbdev);
+ err = dma_async_device_register(dma_dev);
+ if (!err)
+ return 0;
+
+ shdma_cleanup(&hpbdev->shdma_dev);
+error:
+ pm_runtime_disable(&pdev->dev);
+ return err;
+}
+
+static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
+{
+ struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
+ BUG_ON(!schan);
+
+ shdma_free_irq(schan);
+ shdma_chan_remove(schan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static int hpb_dmae_remove(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ hpb_dmae_chan_remove(hpbdev);
+
+ return 0;
+}
+
+static void hpb_dmae_shutdown(struct platform_device *pdev)
+{
+ struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
+ hpb_dmae_ctl_stop(hpbdev);
+}
+
+static struct platform_driver hpb_dmae_driver = {
+ .probe = hpb_dmae_probe,
+ .remove = hpb_dmae_remove,
+ .shutdown = hpb_dmae_shutdown,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hpb-dma-engine",
+ },
+};
+module_platform_driver(hpb_dmae_driver);
+
+MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
new file mode 100644
index 0000000..a2b8258
--- /dev/null
+++ b/drivers/dma/sh/shdma-arm.h
@@ -0,0 +1,51 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+
+#ifndef SHDMA_ARM_H
+#define SHDMA_ARM_H
+
+#include "shdma.h"
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_64BIT = 7,
+ XMIT_SZ_128BIT = 3,
+ XMIT_SZ_256BIT = 4,
+ XMIT_SZ_512BIT = 5,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+#define SH_DMAE_TS_SHIFT { \
+ [XMIT_SZ_8BIT] = 0, \
+ [XMIT_SZ_16BIT] = 1, \
+ [XMIT_SZ_32BIT] = 2, \
+ [XMIT_SZ_64BIT] = 3, \
+ [XMIT_SZ_128BIT] = 4, \
+ [XMIT_SZ_256BIT] = 5, \
+ [XMIT_SZ_512BIT] = 6, \
+}
+
+#define TS_LOW_BIT 0x3 /* --xx */
+#define TS_HI_BIT 0xc /* xx-- */
+
+#define TS_LOW_SHIFT (3)
+#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+
+#define TS_INDEX2VAL(i) \
+ ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
+ (((i) & TS_HI_BIT) << TS_HI_SHIFT))
+
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+
+#endif
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 28ca361..d94ab59 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -171,7 +171,8 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
return NULL;
}
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
@@ -179,7 +180,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (schan->dev->of_node) {
match = schan->hw_req;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, slave_addr, true);
if (ret < 0)
return ret;
@@ -194,7 +195,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (test_and_set_bit(slave_id, shdma_slave_used))
return -EBUSY;
- ret = ops->set_slave(schan, match, false);
+ ret = ops->set_slave(schan, match, slave_addr, false);
if (ret < 0) {
clear_bit(slave_id, shdma_slave_used);
return ret;
@@ -236,7 +237,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
if (!schan->dev->of_node && match >= slave_num)
return false;
- ret = ops->set_slave(schan, match, true);
+ ret = ops->set_slave(schan, match, 0, true);
if (ret < 0)
return false;
@@ -259,7 +260,7 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
*/
if (slave) {
/* Legacy mode: .private is set in filter */
- ret = shdma_setup_slave(schan, slave->slave_id);
+ ret = shdma_setup_slave(schan, slave->slave_id, 0);
if (ret < 0)
goto esetslave;
} else {
@@ -680,7 +681,9 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel, while using it...
*/
config = (struct dma_slave_config *)arg;
- ret = shdma_setup_slave(schan, config->slave_id);
+ ret = shdma_setup_slave(schan, config->slave_id,
+ config->direction == DMA_DEV_TO_MEM ?
+ config->src_addr : config->dst_addr);
if (ret < 0)
return ret;
break;
@@ -831,8 +834,8 @@ static irqreturn_t chan_irqt(int irq, void *dev)
int shdma_request_irq(struct shdma_chan *schan, int irq,
unsigned long flags, const char *name)
{
- int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
- flags, name, schan);
+ int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
+ chan_irqt, flags, name, schan);
schan->irq = ret < 0 ? ret : irq;
@@ -840,13 +843,6 @@ int shdma_request_irq(struct shdma_chan *schan, int irq,
}
EXPORT_SYMBOL(shdma_request_irq);
-void shdma_free_irq(struct shdma_chan *schan)
-{
- if (schan->irq >= 0)
- free_irq(schan->irq, schan);
-}
-EXPORT_SYMBOL(shdma_free_irq);
-
void shdma_chan_probe(struct shdma_dev *sdev,
struct shdma_chan *schan, int id)
{
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 11bcb05..06473a0 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -42,12 +42,9 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
static int shdma_of_probe(struct platform_device *pdev)
{
- const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+ const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
int ret;
- if (!lookup)
- return -EINVAL;
-
ret = of_dma_controller_register(pdev->dev.of_node,
shdma_of_xlate, pdev);
if (ret < 0)
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
new file mode 100644
index 0000000..4fb9997
--- /dev/null
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -0,0 +1,77 @@
+/*
+ * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
+ *
+ * Copyright (C) 2013 Renesas Electronics, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it under the
+ * terms of version 2 the GNU General Public License as published by the Free
+ * Software Foundation.
+ */
+#include <linux/sh_dma.h>
+
+#include "shdma-arm.h"
+
+const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT;
+
+static const struct sh_dmae_slave_config dma_slaves[] = {
+ {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd1, /* MMC0 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd2, /* MMC0 Rx */
+ }, {
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe1, /* MMC1 Tx */
+ }, {
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xe2, /* MMC1 Rx */
+ },
+};
+
+#define DMAE_CHANNEL(a, b) \
+ { \
+ .offset = (a) - 0x20, \
+ .dmars = (a) - 0x20 + 0x40, \
+ .chclr_bit = (b), \
+ .chclr_offset = 0x80 - 0x20, \
+ }
+
+static const struct sh_dmae_channel dma_channels[] = {
+ DMAE_CHANNEL(0x8000, 0),
+ DMAE_CHANNEL(0x8080, 1),
+ DMAE_CHANNEL(0x8100, 2),
+ DMAE_CHANNEL(0x8180, 3),
+ DMAE_CHANNEL(0x8200, 4),
+ DMAE_CHANNEL(0x8280, 5),
+ DMAE_CHANNEL(0x8300, 6),
+ DMAE_CHANNEL(0x8380, 7),
+ DMAE_CHANNEL(0x8400, 8),
+ DMAE_CHANNEL(0x8480, 9),
+ DMAE_CHANNEL(0x8500, 10),
+ DMAE_CHANNEL(0x8580, 11),
+ DMAE_CHANNEL(0x8600, 12),
+ DMAE_CHANNEL(0x8680, 13),
+ DMAE_CHANNEL(0x8700, 14),
+ DMAE_CHANNEL(0x8780, 15),
+ DMAE_CHANNEL(0x8800, 16),
+ DMAE_CHANNEL(0x8880, 17),
+ DMAE_CHANNEL(0x8900, 18),
+ DMAE_CHANNEL(0x8980, 19),
+};
+
+const struct sh_dmae_pdata r8a73a4_dma_pdata = {
+ .slave = dma_slaves,
+ .slave_num = ARRAY_SIZE(dma_slaves),
+ .channel = dma_channels,
+ .channel_num = ARRAY_SIZE(dma_channels),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chclr_present = 1,
+ .chclr_bitwise = 1,
+};
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9314e93..758a57b 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -28,18 +28,19 @@ struct sh_dmae_chan {
struct shdma_chan shdma_chan;
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
- u32 __iomem *base;
+ void __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
+ dma_addr_t slave_addr;
};
struct sh_dmae_device {
struct shdma_dev shdma_dev;
struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
- struct sh_dmae_pdata *pdata;
+ const struct sh_dmae_pdata *pdata;
struct list_head node;
- u32 __iomem *chan_reg;
- u16 __iomem *dmars;
+ void __iomem *chan_reg;
+ void __iomem *dmars;
unsigned int chcr_offset;
u32 chcr_ie_bit;
};
@@ -61,4 +62,11 @@ struct sh_dmae_desc {
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
struct sh_dmae_device, shdma_dev.dma_dev)
+#ifdef CONFIG_SHDMA_R8A73A4
+extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
+#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
+#else
+#define r8a73a4_shdma_devid NULL
+#endif
+
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdmac.c
index 5039fbc..1069e88 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdmac.c
@@ -20,6 +20,8 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
@@ -35,6 +37,15 @@
#include "../dmaengine.h"
#include "shdma.h"
+/* DMA register */
+#define SAR 0x00
+#define DAR 0x04
+#define TCR 0x08
+#define CHCR 0x0C
+#define DMAOR 0x40
+
+#define TEND 0x18 /* USB-DMAC */
+
#define SH_DMAE_DRV_NAME "sh-dma-engine"
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
@@ -49,27 +60,37 @@
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);
-static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+/*
+ * Different DMAC implementations provide different ways to clear DMA channels:
+ * (1) none - no CHCLR registers are available
+ * (2) one CHCLR register per channel - 0 has to be written to it to clear
+ * channel buffers
+ * (3) one CHCLR per several channels - 1 has to be written to the bit,
+ * corresponding to the specific channel to reset it
+ */
+static void channel_clear(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+ const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+ sh_dc->shdma_chan.id;
+ u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
- __raw_writel(data, shdev->chan_reg +
- shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
+ __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
- __raw_writel(data, sh_dc->base + reg / sizeof(u32));
+ __raw_writel(data, sh_dc->base + reg);
}
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
- return __raw_readl(sh_dc->base + reg / sizeof(u32));
+ return __raw_readl(sh_dc->base + reg);
}
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
return __raw_readl(addr);
@@ -79,7 +100,7 @@ static u16 dmaor_read(struct sh_dmae_device *shdev)
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+ void __iomem *addr = shdev->chan_reg + DMAOR;
if (shdev->pdata->dmaor_is_32bit)
__raw_writel(data, addr);
@@ -91,14 +112,14 @@ static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset);
}
static u32 chcr_read(struct sh_dmae_chan *sh_dc)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
- return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
+ return __raw_readl(sh_dc->base + shdev->chcr_offset);
}
/*
@@ -133,7 +154,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
if (sh_chan)
- chclr_write(sh_chan, 0);
+ channel_clear(sh_chan);
}
}
@@ -167,7 +188,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -180,7 +201,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
for (i = 0; i < pdata->ts_shift_num; i++)
@@ -240,9 +261,9 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
- u16 __iomem *addr = shdev->dmars;
+ void __iomem *addr = shdev->dmars;
unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
@@ -253,8 +274,8 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
- addr = (u16 __iomem *)shdev->chan_reg;
- addr += chan_pdata->dmars / sizeof(u16);
+ addr = shdev->chan_reg;
+ addr += chan_pdata->dmars;
__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
addr);
@@ -309,7 +330,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
struct sh_dmae_chan *sh_chan, int match)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_slave_config *cfg;
int i;
@@ -323,7 +344,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
} else {
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
if (cfg->mid_rid == match) {
- sh_chan->shdma_chan.slave_id = cfg->slave_id;
+ sh_chan->shdma_chan.slave_id = i;
return cfg;
}
}
@@ -332,7 +353,7 @@ static const struct sh_dmae_slave_config *dmae_find_slave(
}
static int sh_dmae_set_slave(struct shdma_chan *schan,
- int slave_id, bool try)
+ int slave_id, dma_addr_t slave_addr, bool try)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
@@ -340,8 +361,10 @@ static int sh_dmae_set_slave(struct shdma_chan *schan,
if (!cfg)
return -ENXIO;
- if (!try)
+ if (!try) {
sh_chan->config = cfg;
+ sh_chan->slave_addr = slave_addr ? : cfg->addr;
+ }
return 0;
}
@@ -505,7 +528,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
struct shdma_chan *schan;
int err;
- sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+ sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+ GFP_KERNEL);
if (!sh_chan) {
dev_err(sdev->dma_dev.dev,
"No free memory for allocating dma channels!\n");
@@ -517,7 +541,7 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
shdma_chan_probe(sdev, schan, id);
- sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset;
/* set up channel irq */
if (pdev->id >= 0)
@@ -541,7 +565,6 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
err_no_irq:
/* remove from dmaengine device node */
shdma_chan_remove(schan);
- kfree(sh_chan);
return err;
}
@@ -552,14 +575,9 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
int i;
shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
- struct sh_dmae_chan *sh_chan = container_of(schan,
- struct sh_dmae_chan, shdma_chan);
BUG_ON(!schan);
- shdma_free_irq(&sh_chan->shdma_chan);
-
shdma_chan_remove(schan);
- kfree(sh_chan);
}
dma_dev->chancnt = 0;
}
@@ -636,7 +654,7 @@ static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
* This is an exclusive slave DMA operation, may only be called after a
* successful slave configuration.
*/
- return sh_chan->config->addr;
+ return sh_chan->slave_addr;
}
static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
@@ -658,9 +676,15 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
.get_partial = sh_dmae_get_partial,
};
+static const struct of_device_id sh_dmae_of_match[] = {
+ {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
static int sh_dmae_probe(struct platform_device *pdev)
{
- struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+ const struct sh_dmae_pdata *pdata;
unsigned long irqflags = IRQF_DISABLED,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -669,6 +693,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+ if (pdev->dev.of_node)
+ pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+ else
+ pdata = dev_get_platdata(&pdev->dev);
+
/* get platform data */
if (!pdata || !pdata->channel_num)
return -ENODEV;
@@ -696,33 +725,22 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (!chan || !errirq_res)
return -ENODEV;
- if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
- dev_err(&pdev->dev, "DMAC register region already claimed\n");
- return -EBUSY;
- }
-
- if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
- dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
- err = -EBUSY;
- goto ermrdmars;
- }
-
- err = -ENOMEM;
- shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+ shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+ GFP_KERNEL);
if (!shdev) {
dev_err(&pdev->dev, "Not enough memory\n");
- goto ealloc;
+ return -ENOMEM;
}
dma_dev = &shdev->shdma_dev.dma_dev;
- shdev->chan_reg = ioremap(chan->start, resource_size(chan));
- if (!shdev->chan_reg)
- goto emapchan;
+ shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(shdev->chan_reg))
+ return PTR_ERR(shdev->chan_reg);
if (dmars) {
- shdev->dmars = ioremap(dmars->start, resource_size(dmars));
- if (!shdev->dmars)
- goto emapdmars;
+ shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+ if (IS_ERR(shdev->dmars))
+ return PTR_ERR(shdev->dmars);
}
if (!pdata->slave_only)
@@ -783,8 +801,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
errirq = errirq_res->start;
- err = request_irq(errirq, sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
+ err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev,
"DMA failed requesting irq #%d, error %d\n",
@@ -862,7 +880,6 @@ chan_probe_err:
sh_dmae_chan_remove(shdev);
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
- free_irq(errirq, shdev);
eirq_err:
#endif
rst_err:
@@ -873,21 +890,9 @@ rst_err:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&shdev->shdma_dev);
eshdma:
- if (dmars)
- iounmap(shdev->dmars);
-emapdmars:
- iounmap(shdev->chan_reg);
synchronize_rcu();
-emapchan:
- kfree(shdev);
-ealloc:
- if (dmars)
- release_mem_region(dmars->start, resource_size(dmars));
-ermrdmars:
- release_mem_region(chan->start, resource_size(chan));
return err;
}
@@ -896,14 +901,9 @@ static int sh_dmae_remove(struct platform_device *pdev)
{
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
- struct resource *res;
- int errirq = platform_get_irq(pdev, 0);
dma_async_device_unregister(dma_dev);
- if (errirq > 0)
- free_irq(errirq, shdev);
-
spin_lock_irq(&sh_dmae_lock);
list_del_rcu(&shdev->node);
spin_unlock_irq(&sh_dmae_lock);
@@ -913,31 +913,11 @@ static int sh_dmae_remove(struct platform_device *pdev)
sh_dmae_chan_remove(shdev);
shdma_cleanup(&shdev->shdma_dev);
- if (shdev->dmars)
- iounmap(shdev->dmars);
- iounmap(shdev->chan_reg);
-
- platform_set_drvdata(pdev, NULL);
-
synchronize_rcu();
- kfree(shdev);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res)
- release_mem_region(res->start, resource_size(res));
return 0;
}
-static const struct of_device_id sh_dmae_of_match[] = {
- { .compatible = "renesas,shdma", },
- { }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
static struct platform_driver sh_dmae_driver = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index e7c94bb..c7e9cdf 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -150,7 +150,8 @@ static const struct sudmac_slave_config *sudmac_find_slave(
return NULL;
}
-static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
+ dma_addr_t slave_addr, bool try)
{
struct sudmac_chan *sc = to_chan(schan);
const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
@@ -298,11 +299,8 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
int i;
shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
- struct sudmac_chan *sc = to_chan(schan);
-
BUG_ON(!schan);
- shdma_free_irq(&sc->shdma_chan);
shdma_chan_remove(schan);
}
dma_dev->chancnt = 0;
@@ -335,7 +333,7 @@ static const struct shdma_ops sudmac_shdma_ops = {
static int sudmac_probe(struct platform_device *pdev)
{
- struct sudmac_pdata *pdata = pdev->dev.platform_data;
+ struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
int err, i;
struct sudmac_device *su_dev;
struct dma_device *dma_dev;
@@ -345,9 +343,8 @@ static int sudmac_probe(struct platform_device *pdev)
if (!pdata)
return -ENODEV;
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !irq_res)
+ if (!irq_res)
return -ENODEV;
err = -ENOMEM;
@@ -360,9 +357,10 @@ static int sudmac_probe(struct platform_device *pdev)
dma_dev = &su_dev->shdma_dev.dma_dev;
- su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
- if (!su_dev->chan_reg)
- return err;
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ if (IS_ERR(su_dev->chan_reg))
+ return PTR_ERR(su_dev->chan_reg);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
@@ -373,7 +371,7 @@ static int sudmac_probe(struct platform_device *pdev)
return err;
/* platform data */
- su_dev->pdata = pdev->dev.platform_data;
+ su_dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, su_dev);
@@ -393,7 +391,6 @@ static int sudmac_probe(struct platform_device *pdev)
chan_probe_err:
sudmac_chan_remove(su_dev);
- platform_set_drvdata(pdev, NULL);
shdma_cleanup(&su_dev->shdma_dev);
return err;
@@ -407,7 +404,6 @@ static int sudmac_remove(struct platform_device *pdev)
dma_async_device_unregister(dma_dev);
sudmac_chan_remove(su_dev);
shdma_cleanup(&su_dev->shdma_dev);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 716b23e..6aec3ad 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -73,6 +74,11 @@ struct sirfsoc_dma_chan {
int mode;
};
+struct sirfsoc_dma_regs {
+ u32 ctrl[SIRFSOC_DMA_CHANNELS];
+ u32 interrupt_en;
+};
+
struct sirfsoc_dma {
struct dma_device dma;
struct tasklet_struct tasklet;
@@ -81,10 +87,13 @@ struct sirfsoc_dma {
int irq;
struct clk *clk;
bool is_marco;
+ struct sirfsoc_dma_regs regs_save;
};
#define DRV_NAME "sirfsoc_dma"
+static int sirfsoc_dma_runtime_suspend(struct device *dev);
+
/* Convert struct dma_chan to struct sirfsoc_dma_chan */
static inline
struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
@@ -393,6 +402,8 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
LIST_HEAD(descs);
int i;
+ pm_runtime_get_sync(sdma->dma.dev);
+
/* Alloc descriptors for this channel */
for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
@@ -425,6 +436,7 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
struct sirfsoc_dma_desc *sdesc, *tmp;
unsigned long flags;
LIST_HEAD(descs);
@@ -445,6 +457,8 @@ static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
/* Free descriptors */
list_for_each_entry_safe(sdesc, tmp, &descs, node)
kfree(sdesc);
+
+ pm_runtime_put(sdma->dma.dev);
}
/* Send pending descriptor to hardware */
@@ -595,7 +609,7 @@ sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
spin_unlock_irqrestore(&schan->lock, iflags);
if (!sdesc)
- return 0;
+ return NULL;
/* Place descriptor in prepared list */
spin_lock_irqsave(&schan->lock, iflags);
@@ -723,14 +737,14 @@ static int sirfsoc_dma_probe(struct platform_device *op)
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
- clk_prepare_enable(sdma->clk);
-
/* Register DMA engine */
dev_set_drvdata(dev, sdma);
+
ret = dma_async_device_register(dma);
if (ret)
goto free_irq;
+ pm_runtime_enable(&op->dev);
dev_info(dev, "initialized SIRFSOC DMAC driver\n");
return 0;
@@ -747,13 +761,124 @@ static int sirfsoc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
- clk_disable_unprepare(sdma->clk);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq);
+ pm_runtime_disable(&op->dev);
+ if (!pm_runtime_status_suspended(&op->dev))
+ sirfsoc_dma_runtime_suspend(&op->dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(sdma->clk);
+ return 0;
+}
+
+static int sirfsoc_dma_runtime_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(sdma->clk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int sirfsoc_dma_pm_suspend(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /*
+ * if we were runtime-suspended before, resume to enable clock
+ * before accessing register
+ */
+ if (pm_runtime_status_suspended(dev)) {
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * DMA controller will lose all registers while suspending
+ * so we need to save registers for active channels
+ */
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ save->ctrl[ch] = readl_relaxed(sdma->base +
+ ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ }
+ save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /* Disable clock */
+ sirfsoc_dma_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int sirfsoc_dma_pm_resume(struct device *dev)
+{
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+ struct sirfsoc_dma_regs *save = &sdma->regs_save;
+ struct sirfsoc_dma_desc *sdesc;
+ struct sirfsoc_dma_chan *schan;
+ int ch;
+ int ret;
+
+ /* Enable clock before accessing register */
+ ret = sirfsoc_dma_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
+ for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+ schan = &sdma->channels[ch];
+ if (list_empty(&schan->active))
+ continue;
+ sdesc = list_first_entry(&schan->active,
+ struct sirfsoc_dma_desc,
+ node);
+ writel_relaxed(sdesc->width,
+ sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
+ writel_relaxed(sdesc->xlen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(save->ctrl[ch],
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->addr >> 2,
+ sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
+ }
+
+ /* if we were runtime-suspended before, suspend again */
+ if (pm_runtime_status_suspended(dev))
+ sirfsoc_dma_runtime_suspend(dev);
+
return 0;
}
+static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
+};
+
static struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
{ .compatible = "sirf,marco-dmac", },
@@ -766,6 +891,7 @@ static struct platform_driver sirfsoc_dma_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &sirfsoc_dma_pm_ops,
.of_match_table = sirfsoc_dma_match,
},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 5ab5880..82d2b97 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2591,6 +2591,9 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
int i;
sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
+ if (!sg)
+ return NULL;
+
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -3139,7 +3142,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
struct resource *res = NULL;
@@ -3226,8 +3229,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
dev_info(&pdev->dev,
- "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
- rev, res->start, num_phy_chans, num_log_chans);
+ "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
+ rev, &res->start, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + num_memcpy_chans) *
@@ -3485,7 +3488,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
{
struct stedma40_platform_data *pdata;
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
- const const __be32 *list;
+ const __be32 *list;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct stedma40_platform_data),
@@ -3516,7 +3519,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
list = of_get_property(np, "disabled-channels", &num_disabled);
num_disabled /= sizeof(*list);
- if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+ if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
d40_err(&pdev->dev,
"Invalid number of disabled channels specified (%d)\n",
num_disabled);
@@ -3535,7 +3538,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
static int __init d40_probe(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
struct d40_base *base = NULL;
@@ -3579,9 +3582,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (request_mem_region(res->start, resource_size(res),
D40_NAME " I/O lcpa") == NULL) {
ret = -EBUSY;
- d40_err(&pdev->dev,
- "Failed to request LCPA region 0x%x-0x%x\n",
- res->start, res->end);
+ d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
goto failure;
}
@@ -3589,8 +3590,8 @@ static int __init d40_probe(struct platform_device *pdev)
val = readl(base->virtbase + D40_DREG_LCPA);
if (res->start != val && val != 0) {
dev_warn(&pdev->dev,
- "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
- __func__, val, res->start);
+ "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
+ __func__, val, &res->start);
} else
writel(res->start, base->virtbase + D40_DREG_LCPA);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index f137914..5d4986e 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -767,13 +767,11 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned long flags;
unsigned int residual;
- spin_lock_irqsave(&tdc->lock, flags);
-
ret = dma_cookie_status(dc, cookie, txstate);
- if (ret == DMA_SUCCESS) {
- spin_unlock_irqrestore(&tdc->lock, flags);
+ if (ret == DMA_SUCCESS)
return ret;
- }
+
+ spin_lock_irqsave(&tdc->lock, flags);
/* Check on wait_ack desc status */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 0ef43c1..28af214 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -669,7 +669,7 @@ static irqreturn_t td_irq(int irq, void *devid)
static int td_probe(struct platform_device *pdev)
{
- struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timb_dma *td;
struct resource *iomem;
int irq;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index a59fb48..71e8e77 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -962,15 +962,14 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dc->lock);
- txx9dmac_scan_descriptors(dc);
- spin_unlock_bh(&dc->lock);
+ if (ret == DMA_SUCCESS)
+ return DMA_SUCCESS;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ spin_lock_bh(&dc->lock);
+ txx9dmac_scan_descriptors(dc);
+ spin_unlock_bh(&dc->lock);
- return ret;
+ return dma_cookie_status(chan, cookie, txstate);
}
static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
@@ -1118,9 +1117,10 @@ static void txx9dmac_off(struct txx9dmac_dev *ddev)
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
{
- struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
+ struct txx9dmac_chan_platform_data *cpdata =
+ dev_get_platdata(&pdev->dev);
struct platform_device *dmac_dev = cpdata->dmac_dev;
- struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
struct txx9dmac_chan *dc;
int err;
int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
@@ -1203,7 +1203,7 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
static int __init txx9dmac_probe(struct platform_device *pdev)
{
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *io;
struct txx9dmac_dev *ddev;
u32 mcr;
@@ -1282,7 +1282,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
- struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 mcr;
mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ba9876f..0dfaf20 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -195,8 +195,8 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
return;
for (;; index++) {
- ret = of_parse_phandle_with_args(np, "gpio-ranges",
- "#gpio-range-cells", index, &pinspec);
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ index, &pinspec);
if (ret)
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 55ab924..a6f4cb5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -857,7 +857,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 rpstat, cagf;
+ u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@@ -869,6 +869,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gen6_gt_force_wake_get(dev_priv);
+ reqf = I915_READ(GEN6_RPNSWREQ);
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(dev))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ reqf *= GT_FREQUENCY_MULTIPLIER;
+
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
@@ -893,6 +901,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa091..9b265a4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1290,9 +1290,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
- goto out;
+ if (!HAS_PCH_SPLIT(dev)) {
+ ret = vga_client_register(dev->pdev, dev, NULL,
+ i915_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ goto out;
+ }
intel_register_dsm_handler();
@@ -1348,6 +1351,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
intel_fbdev_initial_config(dev);
+ /*
+ * Must do this after fbcon init so that
+ * vgacon_save_screen() works during the handover.
+ */
+ i915_disable_vga_mem(dev);
+
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ccb28ea..69d8ed5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -157,25 +157,6 @@ MODULE_PARM_DESC(prefault_disable,
static struct drm_driver driver;
extern int intel_agp_enabled;
-#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = id, \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID, \
- .driver_data = (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) { \
- .class = PCI_BASE_CLASS_DISPLAY << 16, \
- .class_mask = 0xff0000, \
- .vendor = 0x8086, \
- .device = 0x16a, \
- .subvendor = 0x152d, \
- .subdevice = 0x8990, \
- .driver_data = (unsigned long) info }
-
-
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -350,118 +331,41 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_vebox_ring = 1,
};
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+#define INTEL_PCI_IDS \
+ INTEL_I830_IDS(&intel_i830_info), \
+ INTEL_I845G_IDS(&intel_845g_info), \
+ INTEL_I85X_IDS(&intel_i85x_info), \
+ INTEL_I865G_IDS(&intel_i865g_info), \
+ INTEL_I915G_IDS(&intel_i915g_info), \
+ INTEL_I915GM_IDS(&intel_i915gm_info), \
+ INTEL_I945G_IDS(&intel_i945g_info), \
+ INTEL_I945GM_IDS(&intel_i945gm_info), \
+ INTEL_I965G_IDS(&intel_i965g_info), \
+ INTEL_G33_IDS(&intel_g33_info), \
+ INTEL_I965GM_IDS(&intel_i965gm_info), \
+ INTEL_GM45_IDS(&intel_gm45_info), \
+ INTEL_G45_IDS(&intel_g45_info), \
+ INTEL_PINEVIEW_IDS(&intel_pineview_info), \
+ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
+ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
+ INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
+ INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
+ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
+ INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
+ INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
+ INTEL_HSW_D_IDS(&intel_haswell_d_info), \
+ INTEL_HSW_M_IDS(&intel_haswell_m_info), \
+ INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info)
+
static const struct pci_device_id pciidlist[] = { /* aka */
- INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
- INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
- INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
- INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
- INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
- INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
- INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
- INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
- INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
- INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
- INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
- INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
- INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
- INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
- INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
- INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
- INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
- INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
- INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
- INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
- INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
- INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
- INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
- INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
- INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
- INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
- INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
- INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
- INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
- INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
- INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
- INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
- INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
- INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
- INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
- INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
- INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
- INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
- INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
- INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
- INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
- INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
- INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
- INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
- INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
- INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
- INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
- INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
- INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
- INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
- INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
- INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
- INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
- INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
- INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
- INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
- INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
- INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
- INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
- INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
- INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
- INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
- INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
- INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
- INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
- INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
- INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
- INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
- INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
- INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
- INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
- INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
- INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
- INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
- INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
- INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
- INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+ INTEL_PCI_IDS,
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52a3785..35874b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1236,6 +1236,13 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
+ /**
+ * wq - Driver workqueue for GEM.
+ *
+ * NOTE: Work items scheduled here are not allowed to grab any modeset
+ * locks, for otherwise the flushing done in the pageflip code will
+ * result in deadlocks.
+ */
struct workqueue_struct *wq;
/* Display functions */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10..d9e337f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -212,7 +212,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+ return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -1695,6 +1695,7 @@ static long
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
+ struct list_head still_bound_list;
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1709,23 +1710,55 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
- global_list) {
+ /*
+ * As we may completely rewrite the bound list whilst unbinding
+ * (due to retiring requests) we have to strictly process only
+ * one element of the list at the time, and recheck the list
+ * on every iteration.
+ */
+ INIT_LIST_HEAD(&still_bound_list);
+ while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
struct i915_vma *vma, *v;
+ obj = list_first_entry(&dev_priv->mm.bound_list,
+ typeof(*obj), global_list);
+ list_move_tail(&obj->global_list, &still_bound_list);
+
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
+ /*
+ * Hold a reference whilst we unbind this object, as we may
+ * end up waiting for and retiring requests. This might
+ * release the final reference (held by the active list)
+ * and result in the object being freed from under us.
+ * in this object being freed.
+ *
+ * Note 1: Shrinking the bound list is special since only active
+ * (and hence bound objects) can contain such limbo objects, so
+ * we don't need special tricks for shrinking the unbound list.
+ * The only other place where we have to be careful with active
+ * objects suddenly disappearing due to retiring requests is the
+ * eviction code.
+ *
+ * Note 2: Even though the bound list doesn't hold a reference
+ * to the object we can safely grab one here: The final object
+ * unreferencing and the bound_list are both protected by the
+ * dev->struct_mutex and so we won't ever be able to observe an
+ * object on the bound_list with a reference count equals 0.
+ */
+ drm_gem_object_reference(&obj->base);
+
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
if (i915_vma_unbind(vma))
break;
- if (!i915_gem_object_put_pages(obj)) {
+ if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
- if (count >= target)
- return count;
- }
+
+ drm_gem_object_unreference(&obj->base);
}
+ list_splice(&still_bound_list, &dev_priv->mm.bound_list);
return count;
}
@@ -1774,7 +1807,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- sg_free_table(st);
kfree(st);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e918b05..7d5752f 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -42,27 +42,24 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
- return ERR_PTR(ret);
+ goto err;
ret = i915_gem_object_get_pages(obj);
- if (ret) {
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_unlock;
+
+ i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret = -ENOMEM;
+ goto err_unpin;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
- if (ret) {
- kfree(st);
- st = ERR_PTR(ret);
- goto out;
- }
+ if (ret)
+ goto err_free;
src = obj->pages->sgl;
dst = st->sgl;
@@ -73,17 +70,23 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- sg_free_table(st);
- kfree(st);
- st = ERR_PTR(-ENOMEM);
- goto out;
+ ret =-ENOMEM;
+ goto err_free_sg;
}
- i915_gem_object_pin_pages(obj);
-
-out:
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
+
+err_free_sg:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+ return ERR_PTR(ret);
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 792c52a..bf34577 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -310,6 +310,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
else
ret = relocate_entry_gtt(obj, reloc);
+ if (ret)
+ return ret;
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9969d10b..e15a1d9 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -201,6 +201,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+ if (dev_priv->gtt.stolen_size == 0)
+ return 0;
+
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 558e568..aba9d74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -641,7 +641,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (WARN_ON(ring->id != RCS))
return NULL;
- obj = ring->private;
+ obj = ring->scratch.obj;
if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a03b445..83cce0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1027,8 +1027,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->display.hpd_irq_setup(dev);
spin_unlock(&dev_priv->irq_lock);
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1655,7 +1660,13 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
wake_up_all(&ring->irq_queue);
}
- queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
+ /*
+ * Our reset work can grab modeset locks (since it needs to reset the
+ * state of outstanding pagelips). Hence it must not be run on our own
+ * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+ * code will deadlock.
+ */
+ schedule_work(&dev_priv->gpu_error.work);
}
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -2027,9 +2038,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) {
- DRM_ERROR("%s on %s\n",
- stuck[i] ? "stuck" : "no progress",
- ring->name);
+ DRM_INFO("%s on %s\n",
+ stuck[i] ? "stuck" : "no progress",
+ ring->name);
rings_hung++;
}
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b6a58f7..c159e1a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -33,21 +33,6 @@
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
- */
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-
-
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
@@ -245,6 +230,7 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
+#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
@@ -693,6 +679,23 @@
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define DERRMR 0x44050
+#define DERRMR_PIPEA_SCANLINE (1<<0)
+#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1)
+#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2)
+#define DERRMR_PIPEA_VBLANK (1<<3)
+#define DERRMR_PIPEA_HBLANK (1<<5)
+#define DERRMR_PIPEB_SCANLINE (1<<8)
+#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9)
+#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10)
+#define DERRMR_PIPEB_VBLANK (1<<11)
+#define DERRMR_PIPEB_HBLANK (1<<13)
+/* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
+#define DERRMR_PIPEC_SCANLINE (1<<14)
+#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15)
+#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20)
+#define DERRMR_PIPEC_VBLANK (1<<21)
+#define DERRMR_PIPEC_HBLANK (1<<22)
+
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
@@ -3310,6 +3313,7 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a777e7f..c8c4112 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -224,6 +224,18 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vlv_gpu_freq(dev_priv->mem_freq,
+ dev_priv->rps.rpe_delay));
+}
+
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
@@ -366,6 +378,7 @@ static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
@@ -409,6 +422,14 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
+static const struct attribute *vlv_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_vlv_rpe_freq_mhz.attr,
+ NULL,
+};
+
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -492,11 +513,13 @@ void i915_setup_sysfs(struct drm_device *dev)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ ret = 0;
+ if (IS_VALLEYVIEW(dev))
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else if (INTEL_INFO(dev)->gen >= 6)
ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
- if (ret)
- DRM_ERROR("gen6 sysfs setup failed\n");
- }
+ if (ret)
+ DRM_ERROR("RPS sysfs setup failed\n");
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
&error_state_attr);
@@ -507,7 +530,10 @@ void i915_setup_sysfs(struct drm_device *dev)
void i915_teardown_sysfs(struct drm_device *dev)
{
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
- sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (IS_VALLEYVIEW(dev))
+ sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+ else
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b5a3875..ea9022e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -688,7 +688,7 @@ static void intel_crt_reset(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
- if (HAS_PCH_SPLIT(dev)) {
+ if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 38452d8..2489d0b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2077,8 +2077,10 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
else
dspcntr &= ~DISPPLANE_TILED;
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
+ else
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
@@ -6762,8 +6764,10 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev)) {
cntl |= CURSOR_PIPE_CSC_ENABLE;
+ cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
+ }
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
@@ -7309,8 +7313,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
}
}
- pipe_config->adjusted_mode.clock = clock.dot *
- pipe_config->pixel_multiplier;
+ pipe_config->adjusted_mode.clock = clock.dot;
}
static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
@@ -7828,12 +7831,6 @@ err:
return ret;
}
-/*
- * On gen7 we currently use the blit ring because (in early silicon at least)
- * the render ring doesn't give us interrpts for page flip completion, which
- * means clients will hang after the first flip is queued. Fortunately the
- * blit ring generates interrupts properly, so use it instead.
- */
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -7842,9 +7839,13 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ struct intel_ring_buffer *ring;
uint32_t plane_bit = 0;
- int ret;
+ int len, ret;
+
+ ring = obj->ring;
+ if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
+ ring = &dev_priv->ring[BCS];
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
@@ -7866,10 +7867,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto err_unpin;
}
- ret = intel_ring_begin(ring, 4);
+ len = 4;
+ if (ring->id == RCS)
+ len += 6;
+
+ ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
+ /* Unmask the flip-done completion message. Note that the bspec says that
+ * we should do this for both the BCS and RCS, and that we must not unmask
+ * more than one flip event at any time (or ensure that one flip message
+ * can be sent by waiting for flip-done prior to queueing new flips).
+ * Experimentation says that BCS works despite DERRMR masking all
+ * flip-done completion events and that unmasking all planes at once
+ * for the RCS also doesn't appear to drop events. Setting the DERRMR
+ * to zero does lead to lockups within MI_DISPLAY_FLIP.
+ */
+ if (ring->id == RCS) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
+ intel_ring_emit(ring, DERRMR);
+ intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ }
+
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
@@ -10022,6 +10047,33 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
+static void i915_enable_vga_mem(struct drm_device *dev)
+{
+ /* Enable VGA memory on Intel HD */
+ if (HAS_PCH_SPLIT(dev)) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+ vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO |
+ VGA_RSRC_NORMAL_MEM);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ }
+}
+
+void i915_disable_vga_mem(struct drm_device *dev)
+{
+ /* Disable VGA memory on Intel HD */
+ if (HAS_PCH_SPLIT(dev)) {
+ vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
+ vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
+ VGA_RSRC_NORMAL_IO |
+ VGA_RSRC_NORMAL_MEM);
+ vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ }
+}
+
void intel_modeset_init_hw(struct drm_device *dev)
{
intel_init_power_well(dev);
@@ -10300,6 +10352,7 @@ void i915_redisable_vga(struct drm_device *dev)
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
+ i915_disable_vga_mem(dev);
}
}
@@ -10513,6 +10566,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
+ i915_enable_vga_mem(dev);
+
intel_disable_gt_powersave(dev);
ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1760808..a47799e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -551,7 +551,7 @@ extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
@@ -792,5 +792,6 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+extern void i915_disable_vga_mem(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4d33278..831a5c0 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -128,8 +128,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_display_mode *fixed_mode =
- lvds_encoder->attached_connector->base.panel.fixed_mode;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc->config.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
@@ -183,9 +183,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
- if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(lvds_encoder->reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index cfb8fb6..119771f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAILED;
intel_panel_set_backlight(dev, bclp, 255);
- iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
+ iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a43c33b..42114ec 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -36,20 +36,12 @@
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
{
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
+ drm_mode_copy(adjusted_mode, fixed_mode);
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
-
- adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4605682..0c115cc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3447,14 +3447,24 @@ int intel_enable_rc6(const struct drm_device *dev)
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 enabled_intrs;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
+
/* only unmask PM interrupts we need. Mask all others. */
- I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+ enabled_intrs = GEN6_PM_RPS_EVENTS;
+
+ /* IVB and SNB hard hangs on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ */
+ if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
}
static void gen6_enable_rps(struct drm_device *dev)
@@ -4950,8 +4960,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- g4x_disable_trickle_feed(dev);
-
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f05ccea..460ee10 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,16 +33,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
- struct drm_i915_gem_object *obj;
- volatile u32 *cpu_page;
- u32 gtt_offset;
-};
-
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
@@ -175,8 +165,7 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
@@ -213,8 +202,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -306,8 +294,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/*
@@ -481,68 +468,43 @@ out:
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc;
- struct drm_i915_gem_object *obj;
int ret;
- if (ring->private)
+ if (ring->scratch.obj)
return 0;
- pc = kmalloc(sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
-
- obj = i915_gem_alloc_object(ring->dev, 4096);
- if (obj == NULL) {
+ ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (ring->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
- ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
- pc->cpu_page = kmap(sg_page(obj->pages->sgl));
- if (pc->cpu_page == NULL) {
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
+ ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
+ if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- ring->name, pc->gtt_offset);
-
- pc->obj = obj;
- ring->private = pc;
+ ring->name, ring->scratch.gtt_offset);
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin(ring->scratch.obj);
err_unref:
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&ring->scratch.obj->base);
err:
- kfree(pc);
return ret;
}
-static void
-cleanup_pipe_control(struct intel_ring_buffer *ring)
-{
- struct pipe_control *pc = ring->private;
- struct drm_i915_gem_object *obj;
-
- obj = pc->obj;
-
- kunmap(sg_page(obj->pages->sgl));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
-
- kfree(pc);
-}
-
static int init_render_ring(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
@@ -607,16 +569,16 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- if (!ring->private)
+ if (ring->scratch.obj == NULL)
return;
- if (HAS_BROKEN_CS_TLB(dev))
- drm_gem_object_unreference(to_gem_object(ring->private));
-
- if (INTEL_INFO(dev)->gen >= 5)
- cleanup_pipe_control(ring);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ kunmap(sg_page(ring->scratch.obj->pages->sgl));
+ i915_gem_object_unpin(ring->scratch.obj);
+ }
- ring->private = NULL;
+ drm_gem_object_unreference(&ring->scratch.obj->base);
+ ring->scratch.obj = NULL;
}
static void
@@ -742,8 +704,7 @@ do { \
static int
pc_render_add_request(struct intel_ring_buffer *ring)
{
- struct pipe_control *pc = ring->private;
- u32 scratch_addr = pc->gtt_offset + 128;
+ u32 scratch_addr = ring->scratch.gtt_offset + 128;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -761,7 +722,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
@@ -780,7 +741,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
- intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@@ -814,15 +775,13 @@ ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
- struct pipe_control *pc = ring->private;
- return pc->cpu_page[0];
+ return ring->scratch.cpu_page[0];
}
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct pipe_control *pc = ring->private;
- pc->cpu_page[0] = seqno;
+ ring->scratch.cpu_page[0] = seqno;
}
static bool
@@ -1141,8 +1100,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
- struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
+ u32 cs_offset = ring->scratch.gtt_offset;
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1835,7 +1793,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret;
}
- ring->private = obj;
+ ring->scratch.obj = obj;
+ ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 432ad53..68b1ca974 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -155,7 +155,11 @@ struct intel_ring_buffer {
struct intel_ring_hangcheck hangcheck;
- void *private;
+ struct {
+ struct drm_i915_gem_object *obj;
+ u32 gtt_offset;
+ volatile u32 *cpu_page;
+ } scratch;
};
static inline bool
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 317e058..85037b9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1151,11 +1151,10 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = intel_encoder->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
- struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+ &crtc->config.adjusted_mode;
+ struct drm_display_mode *mode = &crtc->config.requested_mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1213,13 +1212,15 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ input_dtd.part1.clock /= crtc->config.pixel_multiplier;
+
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (intel_crtc->config.pixel_multiplier) {
+ switch (crtc->config.pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1252,9 +1253,9 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
}
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
- sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
- sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1264,7 +1265,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+ sdvox |= (crtc->config.pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 78b621c..ad6ec4b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -260,8 +260,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
- /* must disable */
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ if (IS_HASWELL(dev))
+ sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
+ else
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
sprctl |= SPRITE_ENABLE;
if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8f5bc86..8649f1c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -261,7 +261,7 @@ void intel_uncore_init(struct drm_device *dev)
}
}
-void intel_uncore_sanitize(struct drm_device *dev)
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -272,6 +272,11 @@ void intel_uncore_sanitize(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ intel_uncore_forcewake_reset(dev);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_disable_gt_powersave(dev);
@@ -549,6 +554,8 @@ static int gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */
ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+ intel_uncore_forcewake_reset(dev);
+
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->uncore.forcewake_count)
dev_priv->uncore.funcs.force_wake_get(dev_priv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8863644..e893c53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -636,7 +636,8 @@ int nouveau_pmops_resume(struct device *dev)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -671,7 +672,8 @@ static int nouveau_pmops_thaw(struct device *dev)
if (drm_dev->mode_config.num_crtc)
nouveau_fbcon_set_suspend(drm_dev, 0);
nouveau_fbcon_zfill_all(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -906,7 +908,8 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev);
- nouveau_display_resume(drm_dev);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_display_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nv_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index e893f6e..af02597 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -257,9 +257,9 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (!conflict->bridge_has_one_vga) {
vga_irq_set_state(conflict, false);
flags |= PCI_VGA_STATE_CHANGE_DECODES;
- if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
pci_bits |= PCI_COMMAND_MEMORY;
- if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
}
@@ -267,11 +267,11 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
- conflict->owns &= ~lwants;
+ conflict->owns &= ~match;
/* If he also owned non-legacy, that is no longer the case */
- if (lwants & VGA_RSRC_LEGACY_MEM)
+ if (match & VGA_RSRC_LEGACY_MEM)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
- if (lwants & VGA_RSRC_LEGACY_IO)
+ if (match & VGA_RSRC_LEGACY_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO;
}
@@ -644,10 +644,12 @@ bail:
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
- int old_decodes;
- struct vga_device *new_vgadev, *conflict;
+ int old_decodes, decodes_removed, decodes_unlocked;
old_decodes = vgadev->decodes;
+ decodes_removed = ~new_decodes & old_decodes;
+ decodes_unlocked = vgadev->locks & decodes_removed;
+ vgadev->owns &= ~decodes_removed;
vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
@@ -656,31 +658,22 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
-
- /* if we own the decodes we should move them along to
- another card */
- if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
- /* set us to own nothing */
- vgadev->owns &= ~old_decodes;
- list_for_each_entry(new_vgadev, &vga_list, list) {
- if ((new_vgadev != vgadev) &&
- (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
- pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
- conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
- if (!conflict)
- __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
- break;
- }
- }
+ /* if we removed locked decodes, lock count goes to zero, and release */
+ if (decodes_unlocked) {
+ if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
+ vgadev->io_lock_cnt = 0;
+ if (decodes_unlocked & VGA_RSRC_LEGACY_MEM)
+ vgadev->mem_lock_cnt = 0;
+ __vga_put(vgadev, decodes_unlocked);
}
/* change decodes counter */
- if (old_decodes != new_decodes) {
- if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
- vga_decode_count++;
- else
- vga_decode_count--;
- }
+ if (old_decodes & VGA_RSRC_LEGACY_MASK &&
+ !(new_decodes & VGA_RSRC_LEGACY_MASK))
+ vga_decode_count--;
+ if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
+ new_decodes & VGA_RSRC_LEGACY_MASK)
+ vga_decode_count++;
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 89cfd64..ef91b8a 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -246,7 +246,7 @@ static struct vrm_model vrm_models[] = {
*/
static u8 get_via_model_d_vrm(void)
{
- unsigned int vid, brand, dummy;
+ unsigned int vid, brand, __maybe_unused dummy;
static const char *brands[4] = {
"C7-M", "C7", "Eden", "C7-D"
};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 18c0623..70a39a8 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -233,8 +233,7 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENOMEM;
if (dev_get_platdata(&client->dev)) {
- pdata =
- (struct ina2xx_platform_data *)dev_get_platdata(&client->dev);
+ pdata = dev_get_platdata(&client->dev);
shunt = pdata->shunt_uohms;
} else if (!of_property_read_u32(client->dev.of_node,
"shunt-resistor", &val)) {
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 81e8cd4..c60b901 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_IRQCHIP) += irqchip.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
+obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
obj-$(CONFIG_ARCH_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ee7c503..d0e9480 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -453,6 +453,12 @@ static void gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL);
}
+void gic_cpu_if_down(void)
+{
+ void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+ writel_relaxed(0, cpu_base + GIC_CPU_CTRL);
+}
+
#ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
new file mode 100644
index 0000000..2cb7cd0
--- /dev/null
+++ b/drivers/irqchip/irq-mmp.c
@@ -0,0 +1,495 @@
+/*
+ * linux/arch/arm/mach-mmp/irq.c
+ *
+ * Generic IRQ handling, GPIO IRQ demultiplexing, etc.
+ * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
+ *
+ * Author: Bin Yang <bin.yang@marvell.com>
+ * Haojian Zhuang <haojian.zhuang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define MAX_ICU_NR 16
+
+#define PJ1_INT_SEL 0x10c
+#define PJ4_INT_SEL 0x104
+
+/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
+#define SEL_INT_PENDING (1 << 6)
+#define SEL_INT_NUM_MASK 0x3f
+
+struct icu_chip_data {
+ int nr_irqs;
+ unsigned int virq_base;
+ unsigned int cascade_irq;
+ void __iomem *reg_status;
+ void __iomem *reg_mask;
+ unsigned int conf_enable;
+ unsigned int conf_disable;
+ unsigned int conf_mask;
+ unsigned int clr_mfp_irq_base;
+ unsigned int clr_mfp_hwirq;
+ struct irq_domain *domain;
+};
+
+struct mmp_intc_conf {
+ unsigned int conf_enable;
+ unsigned int conf_disable;
+ unsigned int conf_mask;
+};
+
+static void __iomem *mmp_icu_base;
+static struct icu_chip_data icu_data[MAX_ICU_NR];
+static int max_icu_nr;
+
+extern void mmp2_clear_pmic_int(void);
+
+static void icu_mask_ack_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_disable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+#ifdef CONFIG_CPU_MMP2
+ if ((data->virq_base == data->clr_mfp_irq_base)
+ && (hwirq == data->clr_mfp_hwirq))
+ mmp2_clear_pmic_int();
+#endif
+ r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+static void icu_mask_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_disable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+ r = readl_relaxed(data->reg_mask) | (1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+static void icu_unmask_irq(struct irq_data *d)
+{
+ struct irq_domain *domain = d->domain;
+ struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
+ int hwirq;
+ u32 r;
+
+ hwirq = d->irq - data->virq_base;
+ if (data == &icu_data[0]) {
+ r = readl_relaxed(mmp_icu_base + (hwirq << 2));
+ r &= ~data->conf_mask;
+ r |= data->conf_enable;
+ writel_relaxed(r, mmp_icu_base + (hwirq << 2));
+ } else {
+ r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
+ writel_relaxed(r, data->reg_mask);
+ }
+}
+
+struct irq_chip icu_irq_chip = {
+ .name = "icu_irq",
+ .irq_mask = icu_mask_irq,
+ .irq_mask_ack = icu_mask_ack_irq,
+ .irq_unmask = icu_unmask_irq,
+};
+
+static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain;
+ struct icu_chip_data *data;
+ int i;
+ unsigned long mask, status, n;
+
+ for (i = 1; i < max_icu_nr; i++) {
+ if (irq == icu_data[i].cascade_irq) {
+ domain = icu_data[i].domain;
+ data = (struct icu_chip_data *)domain->host_data;
+ break;
+ }
+ }
+ if (i >= max_icu_nr) {
+ pr_err("Spurious irq %d in MMP INTC\n", irq);
+ return;
+ }
+
+ mask = readl_relaxed(data->reg_mask);
+ while (1) {
+ status = readl_relaxed(data->reg_status) & ~mask;
+ if (status == 0)
+ break;
+ for_each_set_bit(n, &status, BITS_PER_LONG) {
+ generic_handle_irq(icu_data[i].virq_base + n);
+ }
+ }
+}
+
+static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ return 0;
+}
+
+static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ *out_hwirq = intspec[0];
+ return 0;
+}
+
+const struct irq_domain_ops mmp_irq_domain_ops = {
+ .map = mmp_irq_domain_map,
+ .xlate = mmp_irq_domain_xlate,
+};
+
+static struct mmp_intc_conf mmp_conf = {
+ .conf_enable = 0x51,
+ .conf_disable = 0x0,
+ .conf_mask = 0x7f,
+};
+
+static struct mmp_intc_conf mmp2_conf = {
+ .conf_enable = 0x20,
+ .conf_disable = 0x0,
+ .conf_mask = 0x7f,
+};
+
+static asmlinkage void __exception_irq_entry
+mmp_handle_irq(struct pt_regs *regs)
+{
+ int irq, hwirq;
+
+ hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
+ if (!(hwirq & SEL_INT_PENDING))
+ return;
+ hwirq &= SEL_INT_NUM_MASK;
+ irq = irq_find_mapping(icu_data[0].domain, hwirq);
+ handle_IRQ(irq, regs);
+}
+
+static asmlinkage void __exception_irq_entry
+mmp2_handle_irq(struct pt_regs *regs)
+{
+ int irq, hwirq;
+
+ hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
+ if (!(hwirq & SEL_INT_PENDING))
+ return;
+ hwirq &= SEL_INT_NUM_MASK;
+ irq = irq_find_mapping(icu_data[0].domain, hwirq);
+ handle_IRQ(irq, regs);
+}
+
+/* MMP (ARMv5) */
+void __init icu_init_irq(void)
+{
+ int irq;
+
+ max_icu_nr = 1;
+ mmp_icu_base = ioremap(0xd4282000, 0x1000);
+ icu_data[0].conf_enable = mmp_conf.conf_enable;
+ icu_data[0].conf_disable = mmp_conf.conf_disable;
+ icu_data[0].conf_mask = mmp_conf.conf_mask;
+ icu_data[0].nr_irqs = 64;
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+ &irq_domain_simple_ops,
+ &icu_data[0]);
+ for (irq = 0; irq < 64; irq++) {
+ icu_mask_irq(irq_get_irq_data(irq));
+ irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp_handle_irq);
+}
+
+/* MMP2 (ARMv7) */
+void __init mmp2_init_icu(void)
+{
+ int irq, end;
+
+ max_icu_nr = 8;
+ mmp_icu_base = ioremap(0xd4282000, 0x1000);
+ icu_data[0].conf_enable = mmp2_conf.conf_enable;
+ icu_data[0].conf_disable = mmp2_conf.conf_disable;
+ icu_data[0].conf_mask = mmp2_conf.conf_mask;
+ icu_data[0].nr_irqs = 64;
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
+ &irq_domain_simple_ops,
+ &icu_data[0]);
+ icu_data[1].reg_status = mmp_icu_base + 0x150;
+ icu_data[1].reg_mask = mmp_icu_base + 0x168;
+ icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
+ icu_data[0].nr_irqs;
+ icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */
+ icu_data[1].nr_irqs = 2;
+ icu_data[1].cascade_irq = 4;
+ icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
+ icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
+ icu_data[1].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[1]);
+ icu_data[2].reg_status = mmp_icu_base + 0x154;
+ icu_data[2].reg_mask = mmp_icu_base + 0x16c;
+ icu_data[2].nr_irqs = 2;
+ icu_data[2].cascade_irq = 5;
+ icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
+ icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
+ icu_data[2].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[2]);
+ icu_data[3].reg_status = mmp_icu_base + 0x180;
+ icu_data[3].reg_mask = mmp_icu_base + 0x17c;
+ icu_data[3].nr_irqs = 3;
+ icu_data[3].cascade_irq = 9;
+ icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
+ icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
+ icu_data[3].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[3]);
+ icu_data[4].reg_status = mmp_icu_base + 0x158;
+ icu_data[4].reg_mask = mmp_icu_base + 0x170;
+ icu_data[4].nr_irqs = 5;
+ icu_data[4].cascade_irq = 17;
+ icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
+ icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
+ icu_data[4].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[4]);
+ icu_data[5].reg_status = mmp_icu_base + 0x15c;
+ icu_data[5].reg_mask = mmp_icu_base + 0x174;
+ icu_data[5].nr_irqs = 15;
+ icu_data[5].cascade_irq = 35;
+ icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
+ icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
+ icu_data[5].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[5]);
+ icu_data[6].reg_status = mmp_icu_base + 0x160;
+ icu_data[6].reg_mask = mmp_icu_base + 0x178;
+ icu_data[6].nr_irqs = 2;
+ icu_data[6].cascade_irq = 51;
+ icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
+ icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
+ icu_data[6].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[6]);
+ icu_data[7].reg_status = mmp_icu_base + 0x188;
+ icu_data[7].reg_mask = mmp_icu_base + 0x184;
+ icu_data[7].nr_irqs = 2;
+ icu_data[7].cascade_irq = 55;
+ icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
+ icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
+ icu_data[7].virq_base, 0,
+ &irq_domain_simple_ops,
+ &icu_data[7]);
+ end = icu_data[7].virq_base + icu_data[7].nr_irqs;
+ for (irq = 0; irq < end; irq++) {
+ icu_mask_irq(irq_get_irq_data(irq));
+ if (irq == icu_data[1].cascade_irq ||
+ irq == icu_data[2].cascade_irq ||
+ irq == icu_data[3].cascade_irq ||
+ irq == icu_data[4].cascade_irq ||
+ irq == icu_data[5].cascade_irq ||
+ irq == icu_data[6].cascade_irq ||
+ irq == icu_data[7].cascade_irq) {
+ irq_set_chip(irq, &icu_irq_chip);
+ irq_set_chained_handler(irq, icu_mux_irq_demux);
+ } else {
+ irq_set_chip_and_handler(irq, &icu_irq_chip,
+ handle_level_irq);
+ }
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp2_handle_irq);
+}
+
+#ifdef CONFIG_OF
+static int __init mmp_init_bases(struct device_node *node)
+{
+ int ret, nr_irqs, irq, i = 0;
+
+ ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
+ if (ret) {
+ pr_err("Not found mrvl,intc-nr-irqs property\n");
+ return ret;
+ }
+
+ mmp_icu_base = of_iomap(node, 0);
+ if (!mmp_icu_base) {
+ pr_err("Failed to get interrupt controller register\n");
+ return -ENOMEM;
+ }
+
+ icu_data[0].virq_base = 0;
+ icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[0]);
+ for (irq = 0; irq < nr_irqs; irq++) {
+ ret = irq_create_mapping(icu_data[0].domain, irq);
+ if (!ret) {
+ pr_err("Failed to mapping hwirq\n");
+ goto err;
+ }
+ if (!irq)
+ icu_data[0].virq_base = ret;
+ }
+ icu_data[0].nr_irqs = nr_irqs;
+ return 0;
+err:
+ if (icu_data[0].virq_base) {
+ for (i = 0; i < irq; i++)
+ irq_dispose_mapping(icu_data[0].virq_base + i);
+ }
+ irq_domain_remove(icu_data[0].domain);
+ iounmap(mmp_icu_base);
+ return -EINVAL;
+}
+
+static int __init mmp_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ ret = mmp_init_bases(node);
+ if (ret < 0)
+ return ret;
+
+ icu_data[0].conf_enable = mmp_conf.conf_enable;
+ icu_data[0].conf_disable = mmp_conf.conf_disable;
+ icu_data[0].conf_mask = mmp_conf.conf_mask;
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp_handle_irq);
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
+
+static int __init mmp2_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int ret;
+
+ ret = mmp_init_bases(node);
+ if (ret < 0)
+ return ret;
+
+ icu_data[0].conf_enable = mmp2_conf.conf_enable;
+ icu_data[0].conf_disable = mmp2_conf.conf_disable;
+ icu_data[0].conf_mask = mmp2_conf.conf_mask;
+ irq_set_default_host(icu_data[0].domain);
+ set_handle_irq(mmp2_handle_irq);
+ max_icu_nr = 1;
+ return 0;
+}
+IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
+
+static int __init mmp2_mux_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ int i, ret, irq, j = 0;
+ u32 nr_irqs, mfp_irq;
+
+ if (!parent)
+ return -ENODEV;
+
+ i = max_icu_nr;
+ ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
+ &nr_irqs);
+ if (ret) {
+ pr_err("Not found mrvl,intc-nr-irqs property\n");
+ return -EINVAL;
+ }
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret < 0) {
+ pr_err("Not found reg property\n");
+ return -EINVAL;
+ }
+ icu_data[i].reg_status = mmp_icu_base + res.start;
+ ret = of_address_to_resource(node, 1, &res);
+ if (ret < 0) {
+ pr_err("Not found reg property\n");
+ return -EINVAL;
+ }
+ icu_data[i].reg_mask = mmp_icu_base + res.start;
+ icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
+ if (!icu_data[i].cascade_irq)
+ return -EINVAL;
+
+ icu_data[i].virq_base = 0;
+ icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[i]);
+ for (irq = 0; irq < nr_irqs; irq++) {
+ ret = irq_create_mapping(icu_data[i].domain, irq);
+ if (!ret) {
+ pr_err("Failed to mapping hwirq\n");
+ goto err;
+ }
+ if (!irq)
+ icu_data[i].virq_base = ret;
+ }
+ icu_data[i].nr_irqs = nr_irqs;
+ if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
+ &mfp_irq)) {
+ icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
+ icu_data[i].clr_mfp_hwirq = mfp_irq;
+ }
+ irq_set_chained_handler(icu_data[i].cascade_irq,
+ icu_mux_irq_demux);
+ max_icu_nr++;
+ return 0;
+err:
+ if (icu_data[i].virq_base) {
+ for (j = 0; j < irq; j++)
+ irq_dispose_mapping(icu_data[i].virq_base + j);
+ }
+ irq_domain_remove(icu_data[i].domain);
+ return -EINVAL;
+}
+IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
+#endif
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5ef78ef..2acc43f 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,7 +3,7 @@
#
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
- dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o
+ dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0df3ec0..2956976 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -67,9 +67,11 @@ static void free_bitset(unsigned long *bits)
#define MIGRATION_COUNT_WINDOW 10
/*
- * The block size of the device holding cache data must be >= 32KB
+ * The block size of the device holding cache data must be
+ * between 32KB and 1GB.
*/
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
/*
* FIXME: the cache is read/write for the time being.
@@ -101,6 +103,8 @@ struct cache {
struct dm_target *ti;
struct dm_target_callbacks callbacks;
+ struct dm_cache_metadata *cmd;
+
/*
* Metadata is written to this device.
*/
@@ -117,11 +121,6 @@ struct cache {
struct dm_dev *cache_dev;
/*
- * Cache features such as write-through.
- */
- struct cache_features features;
-
- /*
* Size of the origin device in _complete_ blocks and native sectors.
*/
dm_oblock_t origin_blocks;
@@ -138,8 +137,6 @@ struct cache {
uint32_t sectors_per_block;
int sectors_per_block_shift;
- struct dm_cache_metadata *cmd;
-
spinlock_t lock;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
@@ -148,8 +145,8 @@ struct cache {
struct list_head completed_migrations;
struct list_head need_commit_migrations;
sector_t migration_threshold;
- atomic_t nr_migrations;
wait_queue_head_t migration_wait;
+ atomic_t nr_migrations;
/*
* cache_size entries, dirty if set
@@ -160,9 +157,16 @@ struct cache {
/*
* origin_blocks entries, discarded if set.
*/
- uint32_t discard_block_size; /* a power of 2 times sectors per block */
dm_dblock_t discard_nr_blocks;
unsigned long *discard_bitset;
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+ * save it and regurgitate.
+ */
+ unsigned nr_ctr_args;
+ const char **ctr_args;
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
@@ -187,14 +191,12 @@ struct cache {
bool loaded_mappings:1;
bool loaded_discards:1;
- struct cache_stats stats;
-
/*
- * Rather than reconstructing the table line for the status we just
- * save it and regurgitate.
+ * Cache features such as write-through.
*/
- unsigned nr_ctr_args;
- const char **ctr_args;
+ struct cache_features features;
+
+ struct cache_stats stats;
};
struct per_bio_data {
@@ -1687,24 +1689,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
- unsigned long tmp;
+ unsigned long block_size;
if (!at_least_one_arg(as, error))
return -EINVAL;
- if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
- tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
- tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+ if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
+ block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
+ block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
*error = "Invalid data block size";
return -EINVAL;
}
- if (tmp > ca->cache_sectors) {
+ if (block_size > ca->cache_sectors) {
*error = "Data block size is larger than the cache device";
return -EINVAL;
}
- ca->block_size = tmp;
+ ca->block_size = block_size;
return 0;
}
@@ -2609,9 +2612,17 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct cache *cache = ti->private;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * cache's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < cache->sectors_per_block ||
+ do_div(io_opt_sectors, cache->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ }
set_discard_limits(cache, limits);
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6d2d41a..0fce0bc 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1645,20 +1645,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io",
- WQ_NON_REENTRANT|
- WQ_MEM_RECLAIM,
- 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
cc->crypt_queue = alloc_workqueue("kcryptd",
- WQ_NON_REENTRANT|
- WQ_CPU_INTENSIVE|
- WQ_MEM_RECLAIM,
- 1);
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index f1b7586..afe0814 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -877,7 +877,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
if (new_data < param->data ||
- invalid_str(new_data, (void *) param + param_size) ||
+ invalid_str(new_data, (void *) param + param_size) || !*new_data ||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
DMWARN("Invalid new mapped device name or uuid string supplied.");
return -EINVAL;
@@ -1262,44 +1262,37 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
r = dm_table_create(&t, get_mode(param), param->target_count, md);
if (r)
- goto out;
+ goto err;
+ /* Protect md->type and md->queue against concurrent table loads. */
+ dm_lock_md_type(md);
r = populate_table(t, param, param_size);
- if (r) {
- dm_table_destroy(t);
- goto out;
- }
+ if (r)
+ goto err_unlock_md_type;
immutable_target_type = dm_get_immutable_target_type(md);
if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t))) {
DMWARN("can't replace immutable target type %s",
immutable_target_type->name);
- dm_table_destroy(t);
r = -EINVAL;
- goto out;
+ goto err_unlock_md_type;
}
- /* Protect md->type and md->queue against concurrent table loads. */
- dm_lock_md_type(md);
if (dm_get_md_type(md) == DM_TYPE_NONE)
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load.");
- dm_table_destroy(t);
- dm_unlock_md_type(md);
r = -EINVAL;
- goto out;
+ goto err_unlock_md_type;
}
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md);
if (r) {
DMWARN("unable to set up device queue for new table.");
- dm_table_destroy(t);
- dm_unlock_md_type(md);
- goto out;
+ goto err_unlock_md_type;
}
dm_unlock_md_type(md);
@@ -1309,9 +1302,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
if (!hc || hc->md != md) {
DMWARN("device has been removed from the dev hash table.");
up_write(&_hash_lock);
- dm_table_destroy(t);
r = -ENXIO;
- goto out;
+ goto err_destroy_table;
}
if (hc->new_map)
@@ -1322,7 +1314,6 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
param->flags |= DM_INACTIVE_PRESENT_FLAG;
__dev_status(md, param);
-out:
if (old_map) {
dm_sync_table(md);
dm_table_destroy(old_map);
@@ -1330,6 +1321,15 @@ out:
dm_put(md);
+ return 0;
+
+err_unlock_md_type:
+ dm_unlock_md_type(md);
+err_destroy_table:
+ dm_table_destroy(t);
+err:
+ dm_put(md);
+
return r;
}
@@ -1455,20 +1455,26 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
return 0;
}
-static bool buffer_test_overflow(char *result, unsigned maxlen)
-{
- return !maxlen || strlen(result) + 1 >= maxlen;
-}
-
/*
- * Process device-mapper dependent messages.
+ * Process device-mapper dependent messages. Messages prefixed with '@'
+ * are processed by the DM core. All others are delivered to the target.
* Returns a number <= 1 if message was processed by device mapper.
* Returns 2 if message should be delivered to the target.
*/
static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
char *result, unsigned maxlen)
{
- return 2;
+ int r;
+
+ if (**argv != '@')
+ return 2; /* no '@' prefix, deliver to target */
+
+ r = dm_stats_message(md, argc, argv, result, maxlen);
+ if (r < 2)
+ return r;
+
+ DMERR("Unsupported message sent to DM core: %s", argv[0]);
+ return -EINVAL;
}
/*
@@ -1542,7 +1548,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
if (r == 1) {
param->flags |= DM_DATA_OUT_FLAG;
- if (buffer_test_overflow(result, maxlen))
+ if (dm_message_test_buffer_overflow(result, maxlen))
param->flags |= DM_BUFFER_FULL_FLAG;
else
param->data_size = param->data_start + strlen(result) + 1;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d581fe5..3a7cade 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -833,8 +833,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
goto bad_slab;
INIT_WORK(&kc->kcopyd_work, do_work);
- kc->kcopyd_wq = alloc_workqueue("kcopyd",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
if (!kc->kcopyd_wq)
goto bad_workqueue;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 699b5be..9584443 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1080,8 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
- ms->kmirrord_wq = alloc_workqueue("kmirrord",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
if (!ms->kmirrord_wq) {
DMERR("couldn't start kmirrord");
r = -ENOMEM;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
new file mode 100644
index 0000000..8ae31e8
--- /dev/null
+++ b/drivers/md/dm-stats.c
@@ -0,0 +1,969 @@
+#include <linux/errno.h>
+#include <linux/numa.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include <linux/threads.h>
+#include <linux/preempt.h>
+#include <linux/irqflags.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/device-mapper.h>
+
+#include "dm.h"
+#include "dm-stats.h"
+
+#define DM_MSG_PREFIX "stats"
+
+static int dm_stat_need_rcu_barrier;
+
+/*
+ * Using 64-bit values to avoid overflow (which is a
+ * problem that block/genhd.c's IO accounting has).
+ */
+struct dm_stat_percpu {
+ unsigned long long sectors[2];
+ unsigned long long ios[2];
+ unsigned long long merges[2];
+ unsigned long long ticks[2];
+ unsigned long long io_ticks[2];
+ unsigned long long io_ticks_total;
+ unsigned long long time_in_queue;
+};
+
+struct dm_stat_shared {
+ atomic_t in_flight[2];
+ unsigned long stamp;
+ struct dm_stat_percpu tmp;
+};
+
+struct dm_stat {
+ struct list_head list_entry;
+ int id;
+ size_t n_entries;
+ sector_t start;
+ sector_t end;
+ sector_t step;
+ const char *program_id;
+ const char *aux_data;
+ struct rcu_head rcu_head;
+ size_t shared_alloc_size;
+ size_t percpu_alloc_size;
+ struct dm_stat_percpu *stat_percpu[NR_CPUS];
+ struct dm_stat_shared stat_shared[0];
+};
+
+struct dm_stats_last_position {
+ sector_t last_sector;
+ unsigned last_rw;
+};
+
+/*
+ * A typo on the command line could possibly make the kernel run out of memory
+ * and crash. To prevent the crash we account all used memory. We fail if we
+ * exhaust 1/4 of all memory or 1/2 of vmalloc space.
+ */
+#define DM_STATS_MEMORY_FACTOR 4
+#define DM_STATS_VMALLOC_FACTOR 2
+
+static DEFINE_SPINLOCK(shared_memory_lock);
+
+static unsigned long shared_memory_amount;
+
+static bool __check_shared_memory(size_t alloc_size)
+{
+ size_t a;
+
+ a = shared_memory_amount + alloc_size;
+ if (a < shared_memory_amount)
+ return false;
+ if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
+ return false;
+#ifdef CONFIG_MMU
+ if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
+ return false;
+#endif
+ return true;
+}
+
+static bool check_shared_memory(size_t alloc_size)
+{
+ bool ret;
+
+ spin_lock_irq(&shared_memory_lock);
+
+ ret = __check_shared_memory(alloc_size);
+
+ spin_unlock_irq(&shared_memory_lock);
+
+ return ret;
+}
+
+static bool claim_shared_memory(size_t alloc_size)
+{
+ spin_lock_irq(&shared_memory_lock);
+
+ if (!__check_shared_memory(alloc_size)) {
+ spin_unlock_irq(&shared_memory_lock);
+ return false;
+ }
+
+ shared_memory_amount += alloc_size;
+
+ spin_unlock_irq(&shared_memory_lock);
+
+ return true;
+}
+
+static void free_shared_memory(size_t alloc_size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&shared_memory_lock, flags);
+
+ if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
+ spin_unlock_irqrestore(&shared_memory_lock, flags);
+ DMCRIT("Memory usage accounting bug.");
+ return;
+ }
+
+ shared_memory_amount -= alloc_size;
+
+ spin_unlock_irqrestore(&shared_memory_lock, flags);
+}
+
+static void *dm_kvzalloc(size_t alloc_size, int node)
+{
+ void *p;
+
+ if (!claim_shared_memory(alloc_size))
+ return NULL;
+
+ if (alloc_size <= KMALLOC_MAX_SIZE) {
+ p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
+ if (p)
+ return p;
+ }
+ p = vzalloc_node(alloc_size, node);
+ if (p)
+ return p;
+
+ free_shared_memory(alloc_size);
+
+ return NULL;
+}
+
+static void dm_kvfree(void *ptr, size_t alloc_size)
+{
+ if (!ptr)
+ return;
+
+ free_shared_memory(alloc_size);
+
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+static void dm_stat_free(struct rcu_head *head)
+{
+ int cpu;
+ struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
+
+ kfree(s->program_id);
+ kfree(s->aux_data);
+ for_each_possible_cpu(cpu)
+ dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
+ dm_kvfree(s, s->shared_alloc_size);
+}
+
+static int dm_stat_in_flight(struct dm_stat_shared *shared)
+{
+ return atomic_read(&shared->in_flight[READ]) +
+ atomic_read(&shared->in_flight[WRITE]);
+}
+
+void dm_stats_init(struct dm_stats *stats)
+{
+ int cpu;
+ struct dm_stats_last_position *last;
+
+ mutex_init(&stats->mutex);
+ INIT_LIST_HEAD(&stats->list);
+ stats->last = alloc_percpu(struct dm_stats_last_position);
+ for_each_possible_cpu(cpu) {
+ last = per_cpu_ptr(stats->last, cpu);
+ last->last_sector = (sector_t)ULLONG_MAX;
+ last->last_rw = UINT_MAX;
+ }
+}
+
+void dm_stats_cleanup(struct dm_stats *stats)
+{
+ size_t ni;
+ struct dm_stat *s;
+ struct dm_stat_shared *shared;
+
+ while (!list_empty(&stats->list)) {
+ s = container_of(stats->list.next, struct dm_stat, list_entry);
+ list_del(&s->list_entry);
+ for (ni = 0; ni < s->n_entries; ni++) {
+ shared = &s->stat_shared[ni];
+ if (WARN_ON(dm_stat_in_flight(shared))) {
+ DMCRIT("leaked in-flight counter at index %lu "
+ "(start %llu, end %llu, step %llu): reads %d, writes %d",
+ (unsigned long)ni,
+ (unsigned long long)s->start,
+ (unsigned long long)s->end,
+ (unsigned long long)s->step,
+ atomic_read(&shared->in_flight[READ]),
+ atomic_read(&shared->in_flight[WRITE]));
+ }
+ }
+ dm_stat_free(&s->rcu_head);
+ }
+ free_percpu(stats->last);
+}
+
+static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
+ sector_t step, const char *program_id, const char *aux_data,
+ void (*suspend_callback)(struct mapped_device *),
+ void (*resume_callback)(struct mapped_device *),
+ struct mapped_device *md)
+{
+ struct list_head *l;
+ struct dm_stat *s, *tmp_s;
+ sector_t n_entries;
+ size_t ni;
+ size_t shared_alloc_size;
+ size_t percpu_alloc_size;
+ struct dm_stat_percpu *p;
+ int cpu;
+ int ret_id;
+ int r;
+
+ if (end < start || !step)
+ return -EINVAL;
+
+ n_entries = end - start;
+ if (dm_sector_div64(n_entries, step))
+ n_entries++;
+
+ if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
+ return -EOVERFLOW;
+
+ shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
+ if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
+ return -EOVERFLOW;
+
+ percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
+ if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
+ return -EOVERFLOW;
+
+ if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
+ return -ENOMEM;
+
+ s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
+ if (!s)
+ return -ENOMEM;
+
+ s->n_entries = n_entries;
+ s->start = start;
+ s->end = end;
+ s->step = step;
+ s->shared_alloc_size = shared_alloc_size;
+ s->percpu_alloc_size = percpu_alloc_size;
+
+ s->program_id = kstrdup(program_id, GFP_KERNEL);
+ if (!s->program_id) {
+ r = -ENOMEM;
+ goto out;
+ }
+ s->aux_data = kstrdup(aux_data, GFP_KERNEL);
+ if (!s->aux_data) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ for (ni = 0; ni < n_entries; ni++) {
+ atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
+ atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
+ }
+
+ for_each_possible_cpu(cpu) {
+ p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
+ if (!p) {
+ r = -ENOMEM;
+ goto out;
+ }
+ s->stat_percpu[cpu] = p;
+ }
+
+ /*
+ * Suspend/resume to make sure there is no i/o in flight,
+ * so that newly created statistics will be exact.
+ *
+ * (note: we couldn't suspend earlier because we must not
+ * allocate memory while suspended)
+ */
+ suspend_callback(md);
+
+ mutex_lock(&stats->mutex);
+ s->id = 0;
+ list_for_each(l, &stats->list) {
+ tmp_s = container_of(l, struct dm_stat, list_entry);
+ if (WARN_ON(tmp_s->id < s->id)) {
+ r = -EINVAL;
+ goto out_unlock_resume;
+ }
+ if (tmp_s->id > s->id)
+ break;
+ if (unlikely(s->id == INT_MAX)) {
+ r = -ENFILE;
+ goto out_unlock_resume;
+ }
+ s->id++;
+ }
+ ret_id = s->id;
+ list_add_tail_rcu(&s->list_entry, l);
+ mutex_unlock(&stats->mutex);
+
+ resume_callback(md);
+
+ return ret_id;
+
+out_unlock_resume:
+ mutex_unlock(&stats->mutex);
+ resume_callback(md);
+out:
+ dm_stat_free(&s->rcu_head);
+ return r;
+}
+
+static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+
+ list_for_each_entry(s, &stats->list, list_entry) {
+ if (s->id > id)
+ break;
+ if (s->id == id)
+ return s;
+ }
+
+ return NULL;
+}
+
+static int dm_stats_delete(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+ int cpu;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ list_del_rcu(&s->list_entry);
+ mutex_unlock(&stats->mutex);
+
+ /*
+ * vfree can't be called from RCU callback
+ */
+ for_each_possible_cpu(cpu)
+ if (is_vmalloc_addr(s->stat_percpu))
+ goto do_sync_free;
+ if (is_vmalloc_addr(s)) {
+do_sync_free:
+ synchronize_rcu_expedited();
+ dm_stat_free(&s->rcu_head);
+ } else {
+ ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ call_rcu(&s->rcu_head, dm_stat_free);
+ }
+ return 0;
+}
+
+static int dm_stats_list(struct dm_stats *stats, const char *program,
+ char *result, unsigned maxlen)
+{
+ struct dm_stat *s;
+ sector_t len;
+ unsigned sz = 0;
+
+ /*
+ * Output format:
+ * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+ */
+
+ mutex_lock(&stats->mutex);
+ list_for_each_entry(s, &stats->list, list_entry) {
+ if (!program || !strcmp(program, s->program_id)) {
+ len = s->end - s->start;
+ DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+ (unsigned long long)s->start,
+ (unsigned long long)len,
+ (unsigned long long)s->step,
+ s->program_id,
+ s->aux_data);
+ }
+ }
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
+{
+ /*
+ * This is racy, but so is part_round_stats_single.
+ */
+ unsigned long now = jiffies;
+ unsigned in_flight_read;
+ unsigned in_flight_write;
+ unsigned long difference = now - shared->stamp;
+
+ if (!difference)
+ return;
+ in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
+ in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
+ if (in_flight_read)
+ p->io_ticks[READ] += difference;
+ if (in_flight_write)
+ p->io_ticks[WRITE] += difference;
+ if (in_flight_read + in_flight_write) {
+ p->io_ticks_total += difference;
+ p->time_in_queue += (in_flight_read + in_flight_write) * difference;
+ }
+ shared->stamp = now;
+}
+
+static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
+ unsigned long bi_rw, sector_t len, bool merged,
+ bool end, unsigned long duration)
+{
+ unsigned long idx = bi_rw & REQ_WRITE;
+ struct dm_stat_shared *shared = &s->stat_shared[entry];
+ struct dm_stat_percpu *p;
+
+ /*
+ * For strict correctness we should use local_irq_disable/enable
+ * instead of preempt_disable/enable.
+ *
+ * This is racy if the driver finishes bios from non-interrupt
+ * context as well as from interrupt context or from more different
+ * interrupts.
+ *
+ * However, the race only results in not counting some events,
+ * so it is acceptable.
+ *
+ * part_stat_lock()/part_stat_unlock() have this race too.
+ */
+ preempt_disable();
+ p = &s->stat_percpu[smp_processor_id()][entry];
+
+ if (!end) {
+ dm_stat_round(shared, p);
+ atomic_inc(&shared->in_flight[idx]);
+ } else {
+ dm_stat_round(shared, p);
+ atomic_dec(&shared->in_flight[idx]);
+ p->sectors[idx] += len;
+ p->ios[idx] += 1;
+ p->merges[idx] += merged;
+ p->ticks[idx] += duration;
+ }
+
+ preempt_enable();
+}
+
+static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+ sector_t bi_sector, sector_t end_sector,
+ bool end, unsigned long duration,
+ struct dm_stats_aux *stats_aux)
+{
+ sector_t rel_sector, offset, todo, fragment_len;
+ size_t entry;
+
+ if (end_sector <= s->start || bi_sector >= s->end)
+ return;
+ if (unlikely(bi_sector < s->start)) {
+ rel_sector = 0;
+ todo = end_sector - s->start;
+ } else {
+ rel_sector = bi_sector - s->start;
+ todo = end_sector - bi_sector;
+ }
+ if (unlikely(end_sector > s->end))
+ todo -= (end_sector - s->end);
+
+ offset = dm_sector_div64(rel_sector, s->step);
+ entry = rel_sector;
+ do {
+ if (WARN_ON_ONCE(entry >= s->n_entries)) {
+ DMCRIT("Invalid area access in region id %d", s->id);
+ return;
+ }
+ fragment_len = todo;
+ if (fragment_len > s->step - offset)
+ fragment_len = s->step - offset;
+ dm_stat_for_entry(s, entry, bi_rw, fragment_len,
+ stats_aux->merged, end, duration);
+ todo -= fragment_len;
+ entry++;
+ offset = 0;
+ } while (unlikely(todo != 0));
+}
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+ sector_t bi_sector, unsigned bi_sectors, bool end,
+ unsigned long duration, struct dm_stats_aux *stats_aux)
+{
+ struct dm_stat *s;
+ sector_t end_sector;
+ struct dm_stats_last_position *last;
+
+ if (unlikely(!bi_sectors))
+ return;
+
+ end_sector = bi_sector + bi_sectors;
+
+ if (!end) {
+ /*
+ * A race condition can at worst result in the merged flag being
+ * misrepresented, so we don't have to disable preemption here.
+ */
+ last = __this_cpu_ptr(stats->last);
+ stats_aux->merged =
+ (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
+ (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+ ));
+ ACCESS_ONCE(last->last_sector) = end_sector;
+ ACCESS_ONCE(last->last_rw) = bi_rw;
+ }
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(s, &stats->list, list_entry)
+ __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
+
+ rcu_read_unlock();
+}
+
+static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
+ struct dm_stat *s, size_t x)
+{
+ int cpu;
+ struct dm_stat_percpu *p;
+
+ local_irq_disable();
+ p = &s->stat_percpu[smp_processor_id()][x];
+ dm_stat_round(shared, p);
+ local_irq_enable();
+
+ memset(&shared->tmp, 0, sizeof(shared->tmp));
+ for_each_possible_cpu(cpu) {
+ p = &s->stat_percpu[cpu][x];
+ shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ }
+}
+
+static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
+ bool init_tmp_percpu_totals)
+{
+ size_t x;
+ struct dm_stat_shared *shared;
+ struct dm_stat_percpu *p;
+
+ for (x = idx_start; x < idx_end; x++) {
+ shared = &s->stat_shared[x];
+ if (init_tmp_percpu_totals)
+ __dm_stat_init_temporary_percpu_totals(shared, s, x);
+ local_irq_disable();
+ p = &s->stat_percpu[smp_processor_id()][x];
+ p->sectors[READ] -= shared->tmp.sectors[READ];
+ p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
+ p->ios[READ] -= shared->tmp.ios[READ];
+ p->ios[WRITE] -= shared->tmp.ios[WRITE];
+ p->merges[READ] -= shared->tmp.merges[READ];
+ p->merges[WRITE] -= shared->tmp.merges[WRITE];
+ p->ticks[READ] -= shared->tmp.ticks[READ];
+ p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
+ p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
+ p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
+ p->io_ticks_total -= shared->tmp.io_ticks_total;
+ p->time_in_queue -= shared->tmp.time_in_queue;
+ local_irq_enable();
+ }
+}
+
+static int dm_stats_clear(struct dm_stats *stats, int id)
+{
+ struct dm_stat *s;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ __dm_stat_clear(s, 0, s->n_entries, true);
+
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+/*
+ * This is like jiffies_to_msec, but works for 64-bit values.
+ */
+static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
+{
+ unsigned long long result = 0;
+ unsigned mult;
+
+ if (j)
+ result = jiffies_to_msecs(j & 0x3fffff);
+ if (j >= 1 << 22) {
+ mult = jiffies_to_msecs(1 << 22);
+ result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
+ }
+ if (j >= 1ULL << 44)
+ result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
+
+ return result;
+}
+
+static int dm_stats_print(struct dm_stats *stats, int id,
+ size_t idx_start, size_t idx_len,
+ bool clear, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct dm_stat *s;
+ size_t x;
+ sector_t start, end, step;
+ size_t idx_end;
+ struct dm_stat_shared *shared;
+
+ /*
+ * Output format:
+ * <start_sector>+<length> counters
+ */
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ idx_end = idx_start + idx_len;
+ if (idx_end < idx_start ||
+ idx_end > s->n_entries)
+ idx_end = s->n_entries;
+
+ if (idx_start > idx_end)
+ idx_start = idx_end;
+
+ step = s->step;
+ start = s->start + (step * idx_start);
+
+ for (x = idx_start; x < idx_end; x++, start = end) {
+ shared = &s->stat_shared[x];
+ end = start + step;
+ if (unlikely(end > s->end))
+ end = s->end;
+
+ __dm_stat_init_temporary_percpu_totals(shared, s, x);
+
+ DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
+ (unsigned long long)start,
+ (unsigned long long)step,
+ shared->tmp.ios[READ],
+ shared->tmp.merges[READ],
+ shared->tmp.sectors[READ],
+ dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
+ shared->tmp.ios[WRITE],
+ shared->tmp.merges[WRITE],
+ shared->tmp.sectors[WRITE],
+ dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
+ dm_stat_in_flight(shared),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
+ dm_jiffies_to_msec64(shared->tmp.time_in_queue),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
+ dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
+
+ if (unlikely(sz + 1 >= maxlen))
+ goto buffer_overflow;
+ }
+
+ if (clear)
+ __dm_stat_clear(s, idx_start, idx_end, false);
+
+buffer_overflow:
+ mutex_unlock(&stats->mutex);
+
+ return 1;
+}
+
+static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
+{
+ struct dm_stat *s;
+ const char *new_aux_data;
+
+ mutex_lock(&stats->mutex);
+
+ s = __dm_stats_find(stats, id);
+ if (!s) {
+ mutex_unlock(&stats->mutex);
+ return -ENOENT;
+ }
+
+ new_aux_data = kstrdup(aux_data, GFP_KERNEL);
+ if (!new_aux_data) {
+ mutex_unlock(&stats->mutex);
+ return -ENOMEM;
+ }
+
+ kfree(s->aux_data);
+ s->aux_data = new_aux_data;
+
+ mutex_unlock(&stats->mutex);
+
+ return 0;
+}
+
+static int message_stats_create(struct mapped_device *md,
+ unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int id;
+ char dummy;
+ unsigned long long start, end, len, step;
+ unsigned divisor;
+ const char *program_id, *aux_data;
+
+ /*
+ * Input format:
+ * <range> <step> [<program_id> [<aux_data>]]
+ */
+
+ if (argc < 3 || argc > 5)
+ return -EINVAL;
+
+ if (!strcmp(argv[1], "-")) {
+ start = 0;
+ len = dm_get_size(md);
+ if (!len)
+ len = 1;
+ } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
+ start != (sector_t)start || len != (sector_t)len)
+ return -EINVAL;
+
+ end = start + len;
+ if (start >= end)
+ return -EINVAL;
+
+ if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
+ step = end - start;
+ if (do_div(step, divisor))
+ step++;
+ if (!step)
+ step = 1;
+ } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
+ step != (sector_t)step || !step)
+ return -EINVAL;
+
+ program_id = "-";
+ aux_data = "-";
+
+ if (argc > 3)
+ program_id = argv[3];
+
+ if (argc > 4)
+ aux_data = argv[4];
+
+ /*
+ * If a buffer overflow happens after we created the region,
+ * it's too late (the userspace would retry with a larger
+ * buffer, but the region id that caused the overflow is already
+ * leaked). So we must detect buffer overflow in advance.
+ */
+ snprintf(result, maxlen, "%d", INT_MAX);
+ if (dm_message_test_buffer_overflow(result, maxlen))
+ return 1;
+
+ id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
+ dm_internal_suspend, dm_internal_resume, md);
+ if (id < 0)
+ return id;
+
+ snprintf(result, maxlen, "%d", id);
+
+ return 1;
+}
+
+static int message_stats_delete(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_delete(dm_get_stats(md), id);
+}
+
+static int message_stats_clear(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_clear(dm_get_stats(md), id);
+}
+
+static int message_stats_list(struct mapped_device *md,
+ unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int r;
+ const char *program = NULL;
+
+ if (argc < 1 || argc > 2)
+ return -EINVAL;
+
+ if (argc > 1) {
+ program = kstrdup(argv[1], GFP_KERNEL);
+ if (!program)
+ return -ENOMEM;
+ }
+
+ r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
+
+ kfree(program);
+
+ return r;
+}
+
+static int message_stats_print(struct mapped_device *md,
+ unsigned argc, char **argv, bool clear,
+ char *result, unsigned maxlen)
+{
+ int id;
+ char dummy;
+ unsigned long idx_start = 0, idx_len = ULONG_MAX;
+
+ if (argc != 2 && argc != 4)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ if (argc > 3) {
+ if (strcmp(argv[2], "-") &&
+ sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
+ return -EINVAL;
+ if (strcmp(argv[3], "-") &&
+ sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
+ return -EINVAL;
+ }
+
+ return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
+ result, maxlen);
+}
+
+static int message_stats_set_aux(struct mapped_device *md,
+ unsigned argc, char **argv)
+{
+ int id;
+ char dummy;
+
+ if (argc != 3)
+ return -EINVAL;
+
+ if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+ return -EINVAL;
+
+ return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
+}
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ int r;
+
+ if (dm_request_based(md)) {
+ DMWARN("Statistics are only supported for bio-based devices");
+ return -EOPNOTSUPP;
+ }
+
+ /* All messages here must start with '@' */
+ if (!strcasecmp(argv[0], "@stats_create"))
+ r = message_stats_create(md, argc, argv, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_delete"))
+ r = message_stats_delete(md, argc, argv);
+ else if (!strcasecmp(argv[0], "@stats_clear"))
+ r = message_stats_clear(md, argc, argv);
+ else if (!strcasecmp(argv[0], "@stats_list"))
+ r = message_stats_list(md, argc, argv, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_print"))
+ r = message_stats_print(md, argc, argv, false, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_print_clear"))
+ r = message_stats_print(md, argc, argv, true, result, maxlen);
+ else if (!strcasecmp(argv[0], "@stats_set_aux"))
+ r = message_stats_set_aux(md, argc, argv);
+ else
+ return 2; /* this wasn't a stats message */
+
+ if (r == -EINVAL)
+ DMWARN("Invalid parameters for message %s", argv[0]);
+
+ return r;
+}
+
+int __init dm_statistics_init(void)
+{
+ dm_stat_need_rcu_barrier = 0;
+ return 0;
+}
+
+void dm_statistics_exit(void)
+{
+ if (dm_stat_need_rcu_barrier)
+ rcu_barrier();
+ if (WARN_ON(shared_memory_amount))
+ DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
+}
+
+module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
+MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
new file mode 100644
index 0000000..e7c4984
--- /dev/null
+++ b/drivers/md/dm-stats.h
@@ -0,0 +1,40 @@
+#ifndef DM_STATS_H
+#define DM_STATS_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+int dm_statistics_init(void);
+void dm_statistics_exit(void);
+
+struct dm_stats {
+ struct mutex mutex;
+ struct list_head list; /* list of struct dm_stat */
+ struct dm_stats_last_position __percpu *last;
+ sector_t last_sector;
+ unsigned last_rw;
+};
+
+struct dm_stats_aux {
+ bool merged;
+};
+
+void dm_stats_init(struct dm_stats *st);
+void dm_stats_cleanup(struct dm_stats *st);
+
+struct mapped_device;
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen);
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+ sector_t bi_sector, unsigned bi_sectors, bool end,
+ unsigned long duration, struct dm_stats_aux *aux);
+
+static inline bool dm_stats_used(struct dm_stats *st)
+{
+ return !list_empty(&st->list);
+}
+
+#endif
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d907ca6..73c1712 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,6 +4,7 @@
* This file is released under the GPL.
*/
+#include "dm.h"
#include <linux/device-mapper.h>
#include <linux/module.h>
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f221812..8f87835 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -860,14 +860,17 @@ EXPORT_SYMBOL(dm_consume_args);
static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
- unsigned bio_based = 0, request_based = 0;
+ unsigned bio_based = 0, request_based = 0, hybrid = 0;
struct dm_target *tgt;
struct dm_dev_internal *dd;
struct list_head *devices;
+ unsigned live_md_type;
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
- if (dm_target_request_based(tgt))
+ if (dm_target_hybrid(tgt))
+ hybrid = 1;
+ else if (dm_target_request_based(tgt))
request_based = 1;
else
bio_based = 1;
@@ -879,6 +882,19 @@ static int dm_table_set_type(struct dm_table *t)
}
}
+ if (hybrid && !bio_based && !request_based) {
+ /*
+ * The targets can work either way.
+ * Determine the type from the live device.
+ * Default to bio-based if device is new.
+ */
+ live_md_type = dm_get_md_type(t->md);
+ if (live_md_type == DM_TYPE_REQUEST_BASED)
+ request_based = 1;
+ else
+ bio_based = 1;
+ }
+
if (bio_based) {
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 37ba5db..242e3ce 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,19 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
return -EIO;
}
+static int io_err_map_rq(struct dm_target *ti, struct request *clone,
+ union map_info *map_context)
+{
+ return -EIO;
+}
+
static struct target_type error_target = {
.name = "error",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
+ .map_rq = io_err_map_rq,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 88f2f80..ed06342 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -887,7 +887,8 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
- DMERR_LIMIT("commit failed: error = %d", r);
+ DMERR_LIMIT("%s: commit failed: error = %d",
+ dm_device_name(pool->pool_md), r);
return r;
}
@@ -917,6 +918,13 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
unsigned long flags;
struct pool *pool = tc->pool;
+ /*
+ * Once no_free_space is set we must not allow allocation to succeed.
+ * Otherwise it is difficult to explain, debug, test and support.
+ */
+ if (pool->no_free_space)
+ return -ENOSPC;
+
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
if (r)
return r;
@@ -931,31 +939,30 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
if (!free_blocks) {
- if (pool->no_free_space)
- return -ENOSPC;
- else {
- /*
- * Try to commit to see if that will free up some
- * more space.
- */
- (void) commit_or_fallback(pool);
+ /*
+ * Try to commit to see if that will free up some
+ * more space.
+ */
+ (void) commit_or_fallback(pool);
- r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
- if (r)
- return r;
+ r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
+ if (r)
+ return r;
- /*
- * If we still have no space we set a flag to avoid
- * doing all this checking and return -ENOSPC.
- */
- if (!free_blocks) {
- DMWARN("%s: no free space available.",
- dm_device_name(pool->pool_md));
- spin_lock_irqsave(&pool->lock, flags);
- pool->no_free_space = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
- return -ENOSPC;
- }
+ /*
+ * If we still have no space we set a flag to avoid
+ * doing all this checking and return -ENOSPC. This
+ * flag serves as a latch that disallows allocations from
+ * this pool until the admin takes action (e.g. resize or
+ * table reload).
+ */
+ if (!free_blocks) {
+ DMWARN("%s: no free space available.",
+ dm_device_name(pool->pool_md));
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->no_free_space = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return -ENOSPC;
}
}
@@ -1085,6 +1092,7 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
{
int r;
dm_block_t data_block;
+ struct pool *pool = tc->pool;
r = alloc_data_block(tc, &data_block);
switch (r) {
@@ -1094,13 +1102,14 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
case -ENOSPC:
- no_space(tc->pool, cell);
+ no_space(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- cell_error(tc->pool, cell);
+ set_pool_mode(pool, PM_READ_ONLY);
+ cell_error(pool, cell);
break;
}
}
@@ -1386,7 +1395,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
switch (mode) {
case PM_FAIL:
- DMERR("switching pool to failure mode");
+ DMERR("%s: switching pool to failure mode",
+ dm_device_name(pool->pool_md));
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1394,10 +1404,12 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_READ_ONLY:
- DMERR("switching pool to read-only mode");
+ DMERR("%s: switching pool to read-only mode",
+ dm_device_name(pool->pool_md));
r = dm_pool_abort_metadata(pool->pmd);
if (r) {
- DMERR("aborting transaction failed");
+ DMERR("%s: aborting transaction failed",
+ dm_device_name(pool->pool_md));
set_pool_mode(pool, PM_FAIL);
} else {
dm_pool_metadata_read_only(pool->pmd);
@@ -2156,19 +2168,22 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
if (r) {
- DMERR("failed to retrieve data device size");
+ DMERR("%s: failed to retrieve data device size",
+ dm_device_name(pool->pool_md));
return r;
}
if (data_size < sb_data_size) {
- DMERR("pool target (%llu blocks) too small: expected %llu",
+ DMERR("%s: pool target (%llu blocks) too small: expected %llu",
+ dm_device_name(pool->pool_md),
(unsigned long long)data_size, sb_data_size);
return -EINVAL;
} else if (data_size > sb_data_size) {
r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) {
- DMERR("failed to resize data device");
+ DMERR("%s: failed to resize data device",
+ dm_device_name(pool->pool_md));
set_pool_mode(pool, PM_READ_ONLY);
return r;
}
@@ -2192,19 +2207,22 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
if (r) {
- DMERR("failed to retrieve data device size");
+ DMERR("%s: failed to retrieve metadata device size",
+ dm_device_name(pool->pool_md));
return r;
}
if (metadata_dev_size < sb_metadata_dev_size) {
- DMERR("metadata device (%llu blocks) too small: expected %llu",
+ DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
+ dm_device_name(pool->pool_md),
metadata_dev_size, sb_metadata_dev_size);
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
- DMERR("failed to resize metadata device");
+ DMERR("%s: failed to resize metadata device",
+ dm_device_name(pool->pool_md));
return r;
}
@@ -2530,37 +2548,43 @@ static void pool_status(struct dm_target *ti, status_type_t type,
r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
if (r) {
- DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
if (r) {
- DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+ DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
if (r) {
- DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
if (r) {
- DMERR("dm_pool_get_free_block_count returned %d", r);
+ DMERR("%s: dm_pool_get_free_block_count returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
if (r) {
- DMERR("dm_pool_get_data_dev_size returned %d", r);
+ DMERR("%s: dm_pool_get_data_dev_size returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
if (r) {
- DMERR("dm_pool_get_metadata_snap returned %d", r);
+ DMERR("%s: dm_pool_get_metadata_snap returned %d",
+ dm_device_name(pool->pool_md), r);
goto err;
}
@@ -2648,9 +2672,17 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * pool's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < pool->sectors_per_block ||
+ do_div(io_opt_sectors, pool->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ }
/*
* pt->adjusted_pf is a staging area for the actual features to use.
@@ -2669,7 +2701,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2956,7 +2988,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9e39d2b..6a5e9ed 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,6 +60,7 @@ struct dm_io {
struct bio *bio;
unsigned long start_time;
spinlock_t endio_lock;
+ struct dm_stats_aux stats_aux;
};
/*
@@ -198,6 +199,8 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+
+ struct dm_stats stats;
};
/*
@@ -269,6 +272,7 @@ static int (*_inits[])(void) __initdata = {
dm_io_init,
dm_kcopyd_init,
dm_interface_init,
+ dm_statistics_init,
};
static void (*_exits[])(void) = {
@@ -279,6 +283,7 @@ static void (*_exits[])(void) = {
dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
+ dm_statistics_exit,
};
static int __init dm_init(void)
@@ -384,6 +389,16 @@ int dm_lock_for_deletion(struct mapped_device *md)
return r;
}
+sector_t dm_get_size(struct mapped_device *md)
+{
+ return get_capacity(md->disk);
+}
+
+struct dm_stats *dm_get_stats(struct mapped_device *md)
+{
+ return &md->stats;
+}
+
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
@@ -466,8 +481,9 @@ static int md_in_flight(struct mapped_device *md)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
+ struct bio *bio = io->bio;
int cpu;
- int rw = bio_data_dir(io->bio);
+ int rw = bio_data_dir(bio);
io->start_time = jiffies;
@@ -476,6 +492,10 @@ static void start_io_acct(struct dm_io *io)
part_stat_unlock();
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
+
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ bio_sectors(bio), false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
@@ -491,6 +511,10 @@ static void end_io_acct(struct dm_io *io)
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
part_stat_unlock();
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+ bio_sectors(bio), true, duration, &io->stats_aux);
+
/*
* After this is decremented the bio must not be touched if it is
* a flush.
@@ -1519,7 +1543,7 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
return;
}
-static int dm_request_based(struct mapped_device *md)
+int dm_request_based(struct mapped_device *md)
{
return blk_queue_stackable(md->queue);
}
@@ -1946,8 +1970,7 @@ static struct mapped_device *alloc_dev(int minor)
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
- md->wq = alloc_workqueue("kdmflush",
- WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+ md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
if (!md->wq)
goto bad_thread;
@@ -1959,6 +1982,8 @@ static struct mapped_device *alloc_dev(int minor)
md->flush_bio.bi_bdev = md->bdev;
md->flush_bio.bi_rw = WRITE_FLUSH;
+ dm_stats_init(&md->stats);
+
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
@@ -2010,6 +2035,7 @@ static void free_dev(struct mapped_device *md)
put_disk(md->disk);
blk_cleanup_queue(md->queue);
+ dm_stats_cleanup(&md->stats);
module_put(THIS_MODULE);
kfree(md);
}
@@ -2151,7 +2177,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
/*
* Wipe any geometry if the size of the table changed.
*/
- if (size != get_capacity(md->disk))
+ if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
__set_size(md, size);
@@ -2236,11 +2262,13 @@ void dm_unlock_md_type(struct mapped_device *md)
void dm_set_md_type(struct mapped_device *md, unsigned type)
{
+ BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type;
}
unsigned dm_get_md_type(struct mapped_device *md)
{
+ BUG_ON(!mutex_is_locked(&md->type_lock));
return md->type;
}
@@ -2695,6 +2723,38 @@ out:
return r;
}
+/*
+ * Internal suspend/resume works like userspace-driven suspend. It waits
+ * until all bios finish and prevents issuing new bios to the target drivers.
+ * It may be used only from the kernel.
+ *
+ * Internal suspend holds md->suspend_lock, which prevents interaction with
+ * userspace-driven suspend.
+ */
+
+void dm_internal_suspend(struct mapped_device *md)
+{
+ mutex_lock(&md->suspend_lock);
+ if (dm_suspended_md(md))
+ return;
+
+ set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
+ synchronize_srcu(&md->io_barrier);
+ flush_workqueue(md->wq);
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+}
+
+void dm_internal_resume(struct mapped_device *md)
+{
+ if (dm_suspended_md(md))
+ goto done;
+
+ dm_queue_flush(md);
+
+done:
+ mutex_unlock(&md->suspend_lock);
+}
+
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 45b97da..5e604cc 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -16,6 +16,8 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include "dm-stats.h"
+
/*
* Suspend feature flags
*/
@@ -89,10 +91,21 @@ int dm_setup_md_queue(struct mapped_device *md);
#define dm_target_is_valid(t) ((t)->table)
/*
+ * To check whether the target type is bio-based or not (request-based).
+ */
+#define dm_target_bio_based(t) ((t)->type->map != NULL)
+
+/*
* To check whether the target type is request-based or not (bio-based).
*/
#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
+/*
+ * To check whether the target type is a hybrid (capable of being
+ * either request-based or bio-based).
+ */
+#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
+
/*-----------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
@@ -146,10 +159,16 @@ void dm_destroy(struct mapped_device *md);
void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
+int dm_request_based(struct mapped_device *md);
+sector_t dm_get_size(struct mapped_device *md);
+struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie);
+void dm_internal_suspend(struct mapped_device *md);
+void dm_internal_resume(struct mapped_device *md);
+
int dm_io_init(void);
void dm_io_exit(void);
@@ -162,4 +181,12 @@ void dm_kcopyd_exit(void);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
+/*
+ * Helpers that are used by DM core
+ */
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9f13e13..adf4d7e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1180,7 +1180,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
- mddev->bitmap_info.space;
+ mddev->bitmap_info.default_space;
}
} else if (mddev->pers == NULL) {
@@ -3429,7 +3429,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
- if (mddev->safemode_delay < old_delay)
+ if (mddev->safemode_delay < old_delay || old_delay == 0)
md_safemode_timeout((unsigned long)mddev);
}
return len;
@@ -5144,7 +5144,7 @@ int md_run(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (mddev->flags)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
md_new_event(mddev);
@@ -5289,7 +5289,7 @@ static void __md_stop_writes(struct mddev *mddev)
md_super_wait(mddev);
if (mddev->ro == 0 &&
- (!mddev->in_sync || mddev->flags)) {
+ (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
@@ -5337,8 +5337,14 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
err = -EBUSY;
goto out;
}
- if (bdev)
- sync_blockdev(bdev);
+ if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+ /* Someone opened the device since we flushed it
+ * so page cache could be dirty and it is too late
+ * to flush. So abort
+ */
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5373,14 +5379,14 @@ static int do_md_stop(struct mddev * mddev, int mode,
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
- if (bdev)
- /* It is possible IO was issued on some other
- * open file which was closed before we took ->open_mutex.
- * As that was not the last close __blkdev_put will not
- * have called sync_blockdev, so we must.
+ if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
+ /* Someone opened the device since we flushed it
+ * so page cache could be dirty and it is too late
+ * to flush. So abort
*/
- sync_blockdev(bdev);
-
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
@@ -5628,10 +5634,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
char *ptr, *buf = NULL;
int err = -ENOMEM;
- if (md_allow_write(mddev))
- file = kmalloc(sizeof(*file), GFP_NOIO);
- else
- file = kmalloc(sizeof(*file), GFP_KERNEL);
+ file = kmalloc(sizeof(*file), GFP_NOIO);
if (!file)
goto out;
@@ -6420,6 +6423,20 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
!test_bit(MD_RECOVERY_NEEDED,
&mddev->flags),
msecs_to_jiffies(5000));
+ if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
+ /* Need to flush page cache, and ensure no-one else opens
+ * and writes
+ */
+ mutex_lock(&mddev->open_mutex);
+ if (atomic_read(&mddev->openers) > 1) {
+ mutex_unlock(&mddev->open_mutex);
+ err = -EBUSY;
+ goto abort;
+ }
+ set_bit(MD_STILL_CLOSED, &mddev->flags);
+ mutex_unlock(&mddev->open_mutex);
+ sync_blockdev(bdev);
+ }
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
@@ -6673,6 +6690,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
err = 0;
atomic_inc(&mddev->openers);
+ clear_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
@@ -7817,7 +7835,7 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 20f02c0..608050c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -204,12 +204,16 @@ struct mddev {
struct md_personality *pers;
dev_t unit;
int md_minor;
- struct list_head disks;
+ struct list_head disks;
unsigned long flags;
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
+#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */
#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
+#define MD_STILL_CLOSED 4 /* If set, then array has not been opened since
+ * md_ioctl checked on it.
+ */
int suspended;
atomic_t active_io;
@@ -218,7 +222,7 @@ struct mddev {
* are happening, so run/
* takeover/stop are not safe
*/
- int ready; /* See when safe to pass
+ int ready; /* See when safe to pass
* IO requests down */
struct gendisk *gendisk;
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 81b5138..a7e8bf2 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -615,6 +615,11 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
}
EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+{
+ dm_bufio_prefetch(bm->bufio, b, 1);
+}
+
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
bm->read_only = true;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index be5bff6..9a82083 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,6 +108,11 @@ int dm_bm_unlock(struct dm_block *b);
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock);
+ /*
+ * Request data be prefetched into the cache.
+ */
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+
/*
* Switches the bm to a read only mode. Once read-only mode
* has been entered the following functions will return -EPERM.
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 3586542..468e371 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -161,6 +161,7 @@ struct frame {
};
struct del_stack {
+ struct dm_btree_info *info;
struct dm_transaction_manager *tm;
int top;
struct frame spine[MAX_SPINE_DEPTH];
@@ -183,6 +184,20 @@ static int unprocessed_frames(struct del_stack *s)
return s->top >= 0;
}
+static void prefetch_children(struct del_stack *s, struct frame *f)
+{
+ unsigned i;
+ struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+
+ for (i = 0; i < f->nr_children; i++)
+ dm_bm_prefetch(bm, value64(f->n, i));
+}
+
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+ return f->level < (info->levels - 1);
+}
+
static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
{
int r;
@@ -205,6 +220,7 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
dm_tm_dec(s->tm, b);
else {
+ uint32_t flags;
struct frame *f = s->spine + ++s->top;
r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
@@ -217,6 +233,10 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
f->level = level;
f->nr_children = le32_to_cpu(f->n->header.nr_entries);
f->current_child = 0;
+
+ flags = le32_to_cpu(f->n->header.flags);
+ if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
+ prefetch_children(s, f);
}
return 0;
@@ -230,11 +250,6 @@ static void pop_frame(struct del_stack *s)
dm_tm_unlock(s->tm, f->b);
}
-static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
-{
- return f->level < (info->levels - 1);
-}
-
int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
{
int r;
@@ -243,6 +258,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
+ s->info = info;
s->tm = info->tm;
s->top = -1;
@@ -287,7 +303,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
info->value_type.dec(info->value_type.context,
value_ptr(f->n, i));
}
- f->current_child = f->nr_children;
+ pop_frame(s);
}
}
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 3e7a88d..6058569 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -292,16 +292,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
return dm_tm_unlock(ll->tm, blk);
}
-int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
+ uint32_t *result)
{
__le32 le_rc;
- int r = sm_ll_lookup_bitmap(ll, b, result);
-
- if (r)
- return r;
-
- if (*result != 3)
- return r;
+ int r;
r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
if (r < 0)
@@ -312,6 +307,19 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
return r;
}
+int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+{
+ int r = sm_ll_lookup_bitmap(ll, b, result);
+
+ if (r)
+ return r;
+
+ if (*result != 3)
+ return r;
+
+ return sm_ll_lookup_big_ref_count(ll, b, result);
+}
+
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result)
{
@@ -372,11 +380,12 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return -ENOSPC;
}
-int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
- uint32_t ref_count, enum allocation_event *ev)
+static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+ uint32_t (*mutator)(void *context, uint32_t old),
+ void *context, enum allocation_event *ev)
{
int r;
- uint32_t bit, old;
+ uint32_t bit, old, ref_count;
struct dm_block *nb;
dm_block_t index = b;
struct disk_index_entry ie_disk;
@@ -399,6 +408,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
bm_le = dm_bitmap_data(nb);
old = sm_lookup_bitmap(bm_le, bit);
+ if (old > 2) {
+ r = sm_ll_lookup_big_ref_count(ll, b, &old);
+ if (r < 0)
+ return r;
+ }
+
+ ref_count = mutator(context, old);
+
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
@@ -448,31 +465,35 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
return ll->save_ie(ll, index, &ie_disk);
}
-int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t set_ref_count(void *context, uint32_t old)
{
- int r;
- uint32_t rc;
-
- r = sm_ll_lookup(ll, b, &rc);
- if (r)
- return r;
+ return *((uint32_t *) context);
+}
- return sm_ll_insert(ll, b, rc + 1, ev);
+int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+ uint32_t ref_count, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
}
-int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t inc_ref_count(void *context, uint32_t old)
{
- int r;
- uint32_t rc;
+ return old + 1;
+}
- r = sm_ll_lookup(ll, b, &rc);
- if (r)
- return r;
+int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
+}
- if (!rc)
- return -EINVAL;
+static uint32_t dec_ref_count(void *context, uint32_t old)
+{
+ return old - 1;
+}
- return sm_ll_insert(ll, b, rc - 1, ev);
+int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+ return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
}
int sm_ll_commit(struct ll_disk *ll)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 78ea443..7ff4f25 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <linux/nodemask.h>
#include <trace/events/block.h>
#include "md.h"
@@ -60,6 +61,10 @@
#include "raid0.h"
#include "bitmap.h"
+#define cpu_to_group(cpu) cpu_to_node(cpu)
+#define ANY_GROUP NUMA_NO_NODE
+
+static struct workqueue_struct *raid5_wq;
/*
* Stripe cache
*/
@@ -72,6 +77,7 @@
#define BYPASS_THRESHOLD 1
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
+#define MAX_STRIPE_BATCH 8
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
{
@@ -200,6 +206,49 @@ static int stripe_operations_active(struct stripe_head *sh)
test_bit(STRIPE_COMPUTE_RUN, &sh->state);
}
+static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
+{
+ struct r5conf *conf = sh->raid_conf;
+ struct r5worker_group *group;
+ int thread_cnt;
+ int i, cpu = sh->cpu;
+
+ if (!cpu_online(cpu)) {
+ cpu = cpumask_any(cpu_online_mask);
+ sh->cpu = cpu;
+ }
+
+ if (list_empty(&sh->lru)) {
+ struct r5worker_group *group;
+ group = conf->worker_groups + cpu_to_group(cpu);
+ list_add_tail(&sh->lru, &group->handle_list);
+ group->stripes_cnt++;
+ sh->group = group;
+ }
+
+ if (conf->worker_cnt_per_group == 0) {
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+ }
+
+ group = conf->worker_groups + cpu_to_group(sh->cpu);
+
+ group->workers[0].working = true;
+ /* at least one worker should run to avoid race */
+ queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
+
+ thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
+ /* wakeup more workers */
+ for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
+ if (group->workers[i].working == false) {
+ group->workers[i].working = true;
+ queue_work_on(sh->cpu, raid5_wq,
+ &group->workers[i].work);
+ thread_cnt--;
+ }
+ }
+}
+
static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
{
BUG_ON(!list_empty(&sh->lru));
@@ -214,7 +263,12 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
else {
clear_bit(STRIPE_DELAYED, &sh->state);
clear_bit(STRIPE_BIT_DELAY, &sh->state);
- list_add_tail(&sh->lru, &conf->handle_list);
+ if (conf->worker_cnt_per_group == 0) {
+ list_add_tail(&sh->lru, &conf->handle_list);
+ } else {
+ raid5_wakeup_stripe_thread(sh);
+ return;
+ }
}
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -239,12 +293,62 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
do_release_stripe(conf, sh);
}
+static struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+ struct llist_node *new_head = NULL;
+
+ while (head) {
+ struct llist_node *tmp = head;
+ head = head->next;
+ tmp->next = new_head;
+ new_head = tmp;
+ }
+
+ return new_head;
+}
+
+/* should hold conf->device_lock already */
+static int release_stripe_list(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ int count = 0;
+ struct llist_node *head;
+
+ head = llist_del_all(&conf->released_stripes);
+ head = llist_reverse_order(head);
+ while (head) {
+ sh = llist_entry(head, struct stripe_head, release_list);
+ head = llist_next(head);
+ /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
+ smp_mb();
+ clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
+ /*
+ * Don't worry the bit is set here, because if the bit is set
+ * again, the count is always > 1. This is true for
+ * STRIPE_ON_UNPLUG_LIST bit too.
+ */
+ __release_stripe(conf, sh);
+ count++;
+ }
+
+ return count;
+}
+
static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ bool wakeup;
+ if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ goto slow_path;
+ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+ if (wakeup)
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+slow_path:
local_irq_save(flags);
+ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
do_release_stripe(conf, sh);
spin_unlock(&conf->device_lock);
@@ -359,6 +463,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
raid5_build_block(sh, i, previous);
}
insert_hash(conf, sh);
+ sh->cpu = smp_processor_id();
}
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -491,7 +596,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
&& !test_bit(STRIPE_EXPANDING, &sh->state)
- && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
+ && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
+ && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
@@ -499,6 +605,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
!test_bit(STRIPE_EXPANDING, &sh->state))
BUG();
list_del_init(&sh->lru);
+ if (sh->group) {
+ sh->group->stripes_cnt--;
+ sh->group = NULL;
+ }
}
}
} while (sh == NULL);
@@ -3779,6 +3889,7 @@ static void raid5_activate_delayed(struct r5conf *conf)
if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->hold_list);
+ raid5_wakeup_stripe_thread(sh);
}
}
}
@@ -4058,18 +4169,35 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
* head of the hold_list has changed, i.e. the head was promoted to the
* handle_list.
*/
-static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
+static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
{
- struct stripe_head *sh;
+ struct stripe_head *sh = NULL, *tmp;
+ struct list_head *handle_list = NULL;
+ struct r5worker_group *wg = NULL;
+
+ if (conf->worker_cnt_per_group == 0) {
+ handle_list = &conf->handle_list;
+ } else if (group != ANY_GROUP) {
+ handle_list = &conf->worker_groups[group].handle_list;
+ wg = &conf->worker_groups[group];
+ } else {
+ int i;
+ for (i = 0; i < conf->group_cnt; i++) {
+ handle_list = &conf->worker_groups[i].handle_list;
+ wg = &conf->worker_groups[i];
+ if (!list_empty(handle_list))
+ break;
+ }
+ }
pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
__func__,
- list_empty(&conf->handle_list) ? "empty" : "busy",
+ list_empty(handle_list) ? "empty" : "busy",
list_empty(&conf->hold_list) ? "empty" : "busy",
atomic_read(&conf->pending_full_writes), conf->bypass_count);
- if (!list_empty(&conf->handle_list)) {
- sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
+ if (!list_empty(handle_list)) {
+ sh = list_entry(handle_list->next, typeof(*sh), lru);
if (list_empty(&conf->hold_list))
conf->bypass_count = 0;
@@ -4087,14 +4215,32 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
((conf->bypass_threshold &&
conf->bypass_count > conf->bypass_threshold) ||
atomic_read(&conf->pending_full_writes) == 0)) {
- sh = list_entry(conf->hold_list.next,
- typeof(*sh), lru);
- conf->bypass_count -= conf->bypass_threshold;
- if (conf->bypass_count < 0)
- conf->bypass_count = 0;
- } else
+
+ list_for_each_entry(tmp, &conf->hold_list, lru) {
+ if (conf->worker_cnt_per_group == 0 ||
+ group == ANY_GROUP ||
+ !cpu_online(tmp->cpu) ||
+ cpu_to_group(tmp->cpu) == group) {
+ sh = tmp;
+ break;
+ }
+ }
+
+ if (sh) {
+ conf->bypass_count -= conf->bypass_threshold;
+ if (conf->bypass_count < 0)
+ conf->bypass_count = 0;
+ }
+ wg = NULL;
+ }
+
+ if (!sh)
return NULL;
+ if (wg) {
+ wg->stripes_cnt--;
+ sh->group = NULL;
+ }
list_del_init(&sh->lru);
atomic_inc(&sh->count);
BUG_ON(atomic_read(&sh->count) != 1);
@@ -4127,6 +4273,10 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
*/
smp_mb__before_clear_bit();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+ /*
+ * STRIPE_ON_RELEASE_LIST could be set here. In that
+ * case, the count is always > 1 here
+ */
__release_stripe(conf, sh);
cnt++;
}
@@ -4286,8 +4436,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int previous;
+ int seq;
retry:
+ seq = read_seqcount_begin(&conf->gen_lock);
previous = 0;
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
if (unlikely(conf->reshape_progress != MaxSector)) {
@@ -4320,7 +4472,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
previous,
&dd_idx, NULL);
pr_debug("raid456: make_request, sector %llu logical %llu\n",
- (unsigned long long)new_sector,
+ (unsigned long long)new_sector,
(unsigned long long)logical_sector);
sh = get_active_stripe(conf, new_sector, previous,
@@ -4349,6 +4501,13 @@ static void make_request(struct mddev *mddev, struct bio * bi)
goto retry;
}
}
+ if (read_seqcount_retry(&conf->gen_lock, seq)) {
+ /* Might have got the wrong stripe_head
+ * by accident
+ */
+ release_stripe(sh);
+ goto retry;
+ }
if (rw == WRITE &&
logical_sector >= mddev->suspend_lo &&
@@ -4788,14 +4947,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
-#define MAX_STRIPE_BATCH 8
-static int handle_active_stripes(struct r5conf *conf)
+static int handle_active_stripes(struct r5conf *conf, int group,
+ struct r5worker *worker)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0;
while (batch_size < MAX_STRIPE_BATCH &&
- (sh = __get_priority_stripe(conf)) != NULL)
+ (sh = __get_priority_stripe(conf, group)) != NULL)
batch[batch_size++] = sh;
if (batch_size == 0)
@@ -4813,6 +4972,39 @@ static int handle_active_stripes(struct r5conf *conf)
return batch_size;
}
+static void raid5_do_work(struct work_struct *work)
+{
+ struct r5worker *worker = container_of(work, struct r5worker, work);
+ struct r5worker_group *group = worker->group;
+ struct r5conf *conf = group->conf;
+ int group_id = group - conf->worker_groups;
+ int handled;
+ struct blk_plug plug;
+
+ pr_debug("+++ raid5worker active\n");
+
+ blk_start_plug(&plug);
+ handled = 0;
+ spin_lock_irq(&conf->device_lock);
+ while (1) {
+ int batch_size, released;
+
+ released = release_stripe_list(conf);
+
+ batch_size = handle_active_stripes(conf, group_id, worker);
+ worker->working = false;
+ if (!batch_size && !released)
+ break;
+ handled += batch_size;
+ }
+ pr_debug("%d stripes handled\n", handled);
+
+ spin_unlock_irq(&conf->device_lock);
+ blk_finish_plug(&plug);
+
+ pr_debug("--- raid5worker inactive\n");
+}
+
/*
* This is our raid5 kernel thread.
*
@@ -4836,7 +5028,9 @@ static void raid5d(struct md_thread *thread)
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
- int batch_size;
+ int batch_size, released;
+
+ released = release_stripe_list(conf);
if (
!list_empty(&conf->bitmap_list)) {
@@ -4860,8 +5054,8 @@ static void raid5d(struct md_thread *thread)
handled++;
}
- batch_size = handle_active_stripes(conf);
- if (!batch_size)
+ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+ if (!batch_size && !released)
break;
handled += batch_size;
@@ -4989,10 +5183,70 @@ stripe_cache_active_show(struct mddev *mddev, char *page)
static struct md_sysfs_entry
raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
+static ssize_t
+raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->worker_cnt_per_group);
+ else
+ return 0;
+}
+
+static int alloc_thread_groups(struct r5conf *conf, int cnt);
+static ssize_t
+raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+ int err;
+ struct r5worker_group *old_groups;
+ int old_group_cnt;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+ if (!conf)
+ return -ENODEV;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ if (new == conf->worker_cnt_per_group)
+ return len;
+
+ mddev_suspend(mddev);
+
+ old_groups = conf->worker_groups;
+ old_group_cnt = conf->worker_cnt_per_group;
+
+ conf->worker_groups = NULL;
+ err = alloc_thread_groups(conf, new);
+ if (err) {
+ conf->worker_groups = old_groups;
+ conf->worker_cnt_per_group = old_group_cnt;
+ } else {
+ if (old_groups)
+ kfree(old_groups[0].workers);
+ kfree(old_groups);
+ }
+
+ mddev_resume(mddev);
+
+ if (err)
+ return err;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
+ raid5_show_group_thread_cnt,
+ raid5_store_group_thread_cnt);
+
static struct attribute *raid5_attrs[] = {
&raid5_stripecache_size.attr,
&raid5_stripecache_active.attr,
&raid5_preread_bypass_threshold.attr,
+ &raid5_group_thread_cnt.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -5000,6 +5254,54 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
+static int alloc_thread_groups(struct r5conf *conf, int cnt)
+{
+ int i, j;
+ ssize_t size;
+ struct r5worker *workers;
+
+ conf->worker_cnt_per_group = cnt;
+ if (cnt == 0) {
+ conf->worker_groups = NULL;
+ return 0;
+ }
+ conf->group_cnt = num_possible_nodes();
+ size = sizeof(struct r5worker) * cnt;
+ workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
+ conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
+ conf->group_cnt, GFP_NOIO);
+ if (!conf->worker_groups || !workers) {
+ kfree(workers);
+ kfree(conf->worker_groups);
+ conf->worker_groups = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < conf->group_cnt; i++) {
+ struct r5worker_group *group;
+
+ group = &conf->worker_groups[i];
+ INIT_LIST_HEAD(&group->handle_list);
+ group->conf = conf;
+ group->workers = workers + i * cnt;
+
+ for (j = 0; j < cnt; j++) {
+ group->workers[j].group = group;
+ INIT_WORK(&group->workers[j].work, raid5_do_work);
+ }
+ }
+
+ return 0;
+}
+
+static void free_thread_groups(struct r5conf *conf)
+{
+ if (conf->worker_groups)
+ kfree(conf->worker_groups[0].workers);
+ kfree(conf->worker_groups);
+ conf->worker_groups = NULL;
+}
+
static sector_t
raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{
@@ -5040,6 +5342,7 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
kfree(conf->disks);
@@ -5168,7 +5471,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
if (conf == NULL)
goto abort;
+ /* Don't enable multi-threading by default*/
+ if (alloc_thread_groups(conf, 0))
+ goto abort;
spin_lock_init(&conf->device_lock);
+ seqcount_init(&conf->gen_lock);
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
@@ -5176,6 +5483,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
INIT_LIST_HEAD(&conf->inactive_list);
+ init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
atomic_set(&conf->active_aligned_reads, 0);
@@ -5980,6 +6288,7 @@ static int raid5_start_reshape(struct mddev *mddev)
atomic_set(&conf->reshape_stripes, 0);
spin_lock_irq(&conf->device_lock);
+ write_seqcount_begin(&conf->gen_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->raid_disks += mddev->delta_disks;
conf->prev_chunk_sectors = conf->chunk_sectors;
@@ -5996,8 +6305,16 @@ static int raid5_start_reshape(struct mddev *mddev)
else
conf->reshape_progress = 0;
conf->reshape_safe = conf->reshape_progress;
+ write_seqcount_end(&conf->gen_lock);
spin_unlock_irq(&conf->device_lock);
+ /* Now make sure any requests that proceeded on the assumption
+ * the reshape wasn't running - like Discard or Read - have
+ * completed.
+ */
+ mddev_suspend(mddev);
+ mddev_resume(mddev);
+
/* Add some new drives, as many as will fit.
* We know there are enough to make the newly sized array work.
* Don't add devices if we are reducing the number of
@@ -6472,6 +6789,10 @@ static struct md_personality raid4_personality =
static int __init raid5_init(void)
{
+ raid5_wq = alloc_workqueue("raid5wq",
+ WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
+ if (!raid5_wq)
+ return -ENOMEM;
register_md_personality(&raid6_personality);
register_md_personality(&raid5_personality);
register_md_personality(&raid4_personality);
@@ -6483,6 +6804,7 @@ static void raid5_exit(void)
unregister_md_personality(&raid6_personality);
unregister_md_personality(&raid5_personality);
unregister_md_personality(&raid4_personality);
+ destroy_workqueue(raid5_wq);
}
module_init(raid5_init);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 70c4932..2113ffa 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -197,6 +197,7 @@ enum reconstruct_states {
struct stripe_head {
struct hlist_node hash;
struct list_head lru; /* inactive_list or handle_list */
+ struct llist_node release_list;
struct r5conf *raid_conf;
short generation; /* increments with every
* reshape */
@@ -211,6 +212,8 @@ struct stripe_head {
enum check_states check_state;
enum reconstruct_states reconstruct_state;
spinlock_t stripe_lock;
+ int cpu;
+ struct r5worker_group *group;
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -321,6 +324,7 @@ enum {
STRIPE_OPS_REQ_PENDING,
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
+ STRIPE_ON_RELEASE_LIST,
};
/*
@@ -363,6 +367,19 @@ struct disk_info {
struct md_rdev *rdev, *replacement;
};
+struct r5worker {
+ struct work_struct work;
+ struct r5worker_group *group;
+ bool working;
+};
+
+struct r5worker_group {
+ struct list_head handle_list;
+ struct r5conf *conf;
+ struct r5worker *workers;
+ int stripes_cnt;
+};
+
struct r5conf {
struct hlist_head *stripe_hashtbl;
struct mddev *mddev;
@@ -386,6 +403,7 @@ struct r5conf {
int prev_chunk_sectors;
int prev_algo;
short generation; /* increments with every reshape */
+ seqcount_t gen_lock; /* lock against generation changes */
unsigned long reshape_checkpoint; /* Time we last updated
* metadata */
long long min_offset_diff; /* minimum difference between
@@ -445,6 +463,7 @@ struct r5conf {
*/
atomic_t active_stripes;
struct list_head inactive_list;
+ struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
int inactive_blocked; /* release of inactive stripes blocked,
@@ -458,6 +477,9 @@ struct r5conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+ struct r5worker_group *worker_groups;
+ int group_cnt;
+ int worker_cnt_per_group;
};
/*
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cd0b7f4..1a3163f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -812,7 +812,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, int *ecc_err)
+ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -850,6 +850,16 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
(brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
*ecc_err = 1;
+ /* Flag General errors */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if ((status & R1_ERROR) ||
+ (brq->stop.resp[0] & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0], status);
+ *gen_err = 1;
+ }
+
/*
* Check the current card state. If it is in some data transfer
* mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -869,6 +879,13 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return ERR_ABORT;
if (stop_status & R1_CARD_ECC_FAILED)
*ecc_err = 1;
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if (stop_status & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ stop_status);
+ *gen_err = 1;
+ }
}
/* Check for set block count errors */
@@ -1097,7 +1114,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
- int ecc_err = 0;
+ int ecc_err = 0, gen_err = 0;
/*
* sbc.error indicates a problem with the set block count
@@ -1111,7 +1128,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
*/
if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY:
return MMC_BLK_RETRY;
case ERR_ABORT:
@@ -1143,6 +1160,14 @@ static int mmc_blk_err_check(struct mmc_card *card,
u32 status;
unsigned long timeout;
+ /* Check stop command response */
+ if (brq->stop.resp[0] & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0]);
+ gen_err = 1;
+ }
+
timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
do {
int err = get_card_status(card, &status, 5);
@@ -1152,6 +1177,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_CMD_ERR;
}
+ if (status & R1_ERROR) {
+ pr_err("%s: %s: general error sending status command, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ status);
+ gen_err = 1;
+ }
+
/* Timeout if the device never becomes ready for data
* and never leaves the program state.
*/
@@ -1171,6 +1203,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
(R1_CURRENT_STATE(status) == R1_STATE_PRG));
}
+ /* if general error occurs, retry the write operation. */
+ if (gen_err) {
+ pr_warn("%s: retrying write for general error\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY;
+ }
+
if (brq->data.error) {
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->data.error,
@@ -2191,10 +2230,10 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* is freeing the queue that stops new requests
* from being accepted.
*/
+ card = md->queue.card;
mmc_cleanup_queue(&md->queue);
if (md->flags & MMC_BLK_PACKED_CMD)
mmc_packed_clean(&md->queue);
- card = md->queue.card;
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index a69df52..0c0fc52 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2849,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
struct seq_file *sf = (struct seq_file *)file->private_data;
struct mmc_card *card = (struct mmc_card *)sf->private;
struct mmc_test_card *test;
- char lbuf[12];
long testcase;
+ int ret;
- if (count >= sizeof(lbuf))
- return -EINVAL;
-
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
- lbuf[count] = '\0';
-
- if (strict_strtol(lbuf, 10, &testcase))
- return -EINVAL;
+ ret = kstrtol_from_user(buf, count, 10, &testcase);
+ if (ret)
+ return ret;
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
if (!test)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5d08855..bf18b6b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -27,6 +27,7 @@
#include <linux/fault-inject.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -1196,6 +1197,49 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
}
EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
+#ifdef CONFIG_OF
+
+/**
+ * mmc_of_parse_voltage - return mask of supported voltages
+ * @np: The device node need to be parsed.
+ * @mask: mask of voltages available for MMC/SD/SDIO
+ *
+ * 1. Return zero on success.
+ * 2. Return negative errno: voltage-range is invalid.
+ */
+int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
+{
+ const u32 *voltage_ranges;
+ int num_ranges, i;
+
+ voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
+ num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
+ if (!voltage_ranges || !num_ranges) {
+ pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ranges; i++) {
+ const int j = i * 2;
+ u32 ocr_mask;
+
+ ocr_mask = mmc_vddrange_to_ocrmask(
+ be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
+ if (!ocr_mask) {
+ pr_err("%s: voltage-range #%d is invalid\n",
+ np->full_name, i);
+ return -EINVAL;
+ }
+ *mask |= ocr_mask;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_of_parse_voltage);
+
+#endif /* CONFIG_OF */
+
#ifdef CONFIG_REGULATOR
/**
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6fb6f77..49bc403 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -374,7 +374,7 @@ int mmc_of_parse(struct mmc_host *host)
if (!(flags & OF_GPIO_ACTIVE_LOW))
gpio_inv_cd = true;
- ret = mmc_gpio_request_cd(host, gpio);
+ ret = mmc_gpio_request_cd(host, gpio, 0);
if (ret < 0) {
dev_err(host->parent,
"Failed to request CD GPIO #%d: %d!\n",
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 837fc73..ef18348 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -531,6 +531,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
data.sg = &sg;
data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 176d125..5e8823d 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -215,7 +215,7 @@ static int mmc_decode_scr(struct mmc_card *card)
static int mmc_read_ssr(struct mmc_card *card)
{
unsigned int au, es, et, eo;
- int err, i;
+ int err, i, max_au;
u32 *ssr;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -239,12 +239,15 @@ static int mmc_read_ssr(struct mmc_card *card)
for (i = 0; i < 16; i++)
ssr[i] = be32_to_cpu(ssr[i]);
+ /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */
+ max_au = card->scr.sda_spec3 ? 0xF : 0x9;
+
/*
* UNSTUFF_BITS only works with four u32s so we have to offset the
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(ssr, 428 - 384, 4);
- if (au > 0 && au <= 9) {
+ if (au > 0 && au <= max_au) {
card->ssr.au = 1 << (au + 4);
es = UNSTUFF_BITS(ssr, 408 - 384, 16);
et = UNSTUFF_BITS(ssr, 402 - 384, 6);
@@ -942,13 +945,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
- return err;
+ goto free_card;
}
if (!oldcard) {
err = mmc_sd_get_csd(host, card);
if (err)
- return err;
+ goto free_card;
mmc_decode_cid(card);
}
@@ -959,7 +962,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
- return err;
+ goto free_card;
}
err = mmc_sd_setup_card(host, card, oldcard != NULL);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 3242351..46596b71 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -135,6 +135,7 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
* mmc_gpio_request_cd - request a gpio for card-detection
* @host: mmc host
* @gpio: gpio number requested
+ * @debounce: debounce time in microseconds
*
* As devm_* managed functions are used in mmc_gpio_request_cd(), client
* drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
@@ -143,9 +144,14 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
* switching for card-detection, they are responsible for calling
* mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
*
+ * If GPIO debouncing is desired, set the debounce parameter to a non-zero
+ * value. The caller is responsible for ensuring that the GPIO driver associated
+ * with the GPIO supports debouncing, otherwise an error will be returned.
+ *
* Returns zero on success, else an error.
*/
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ unsigned int debounce)
{
struct mmc_gpio *ctx;
int irq = gpio_to_irq(gpio);
@@ -167,6 +173,12 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
*/
return ret;
+ if (debounce) {
+ ret = gpio_set_debounce(gpio, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Even if gpio_to_irq() returns a valid IRQ number, the platform might
* still prefer to poll, e.g., because that IRQ number is already used
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8a4c066..b7fd5ab 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -284,11 +284,11 @@ config MMC_OMAP
config MMC_OMAP_HS
tristate "TI OMAP High Speed Multimedia Card Interface support"
- depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
This selects the TI OMAP High Speed Multimedia card Interface.
- If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
- Multimedia Card slot, say Y or M here.
+ If you have an omap2plus board with a Multimedia Card slot,
+ say Y or M here.
If unsure, say N.
@@ -530,7 +530,7 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
config MMC_DW
tristate "Synopsys DesignWare Memory Card Interface"
- depends on ARM
+ depends on ARC || ARM
help
This selects support for the Synopsys DesignWare Mobile Storage IP
block, this provides host support for SD and MMC interfaces, in both
@@ -569,7 +569,7 @@ config MMC_DW_EXYNOS
config MMC_DW_SOCFPGA
tristate "SOCFPGA specific extensions for Synopsys DW Memory Card Interface"
- depends on MMC_DW
+ depends on MMC_DW && MFD_SYSCON
select MMC_DW_PLTFM
help
This selects support for Altera SoCFPGA specific extensions to the
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index d422e21..c41d0c3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -52,8 +52,6 @@ obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
-obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
-
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index bdb84da..69e438e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -378,6 +378,8 @@ static int atmci_regs_show(struct seq_file *s, void *v)
{
struct atmel_mci *host = s->private;
u32 *buf;
+ int ret = 0;
+
buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
if (!buf)
@@ -388,12 +390,16 @@ static int atmci_regs_show(struct seq_file *s, void *v)
* not disabling interrupts, so IMR and SR may not be
* consistent.
*/
+ ret = clk_prepare_enable(host->mck);
+ if (ret)
+ goto out;
+
spin_lock_bh(&host->lock);
- clk_enable(host->mck);
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
- clk_disable(host->mck);
spin_unlock_bh(&host->lock);
+ clk_disable_unprepare(host->mck);
+
seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
@@ -442,9 +448,10 @@ static int atmci_regs_show(struct seq_file *s, void *v)
val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
}
+out:
kfree(buf);
- return 0;
+ return ret;
}
static int atmci_regs_open(struct inode *inode, struct file *file)
@@ -1262,6 +1269,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct atmel_mci_slot *slot = mmc_priv(mmc);
struct atmel_mci *host = slot->host;
unsigned int i;
+ bool unprepare_clk;
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
@@ -1277,9 +1285,13 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned int clock_min = ~0U;
u32 clkdiv;
+ clk_prepare(host->mck);
+ unprepare_clk = true;
+
spin_lock_bh(&host->lock);
if (!host->mode_reg) {
clk_enable(host->mck);
+ unprepare_clk = false;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
if (host->caps.has_cfg_reg)
@@ -1347,6 +1359,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
} else {
bool any_slot_active = false;
+ unprepare_clk = false;
+
spin_lock_bh(&host->lock);
slot->clock = 0;
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
@@ -1360,12 +1374,16 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->mode_reg) {
atmci_readl(host, ATMCI_MR);
clk_disable(host->mck);
+ unprepare_clk = true;
}
host->mode_reg = 0;
}
spin_unlock_bh(&host->lock);
}
+ if (unprepare_clk)
+ clk_unprepare(host->mck);
+
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
@@ -2376,10 +2394,12 @@ static int __init atmci_probe(struct platform_device *pdev)
if (!host->regs)
goto err_ioremap;
- clk_enable(host->mck);
+ ret = clk_prepare_enable(host->mck);
+ if (ret)
+ goto err_request_irq;
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
host->bus_hz = clk_get_rate(host->mck);
- clk_disable(host->mck);
+ clk_disable_unprepare(host->mck);
host->mapbase = regs->start;
@@ -2482,11 +2502,11 @@ static int __exit atmci_remove(struct platform_device *pdev)
atmci_cleanup_slot(host->slot[i], i);
}
- clk_enable(host->mck);
+ clk_prepare_enable(host->mck);
atmci_writel(host, ATMCI_IDR, ~0UL);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
atmci_readl(host, ATMCI_SR);
- clk_disable(host->mck);
+ clk_disable_unprepare(host->mck);
if (host->dma.chan)
dma_release_channel(host->dma.chan);
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 866edef..6a1fa21 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -39,6 +39,7 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS4210,
DW_MCI_TYPE_EXYNOS4412,
DW_MCI_TYPE_EXYNOS5250,
+ DW_MCI_TYPE_EXYNOS5420,
};
/* Exynos implementation specific driver private data */
@@ -62,6 +63,9 @@ static struct dw_mci_exynos_compatible {
}, {
.compatible = "samsung,exynos5250-dw-mshc",
.ctrl_type = DW_MCI_TYPE_EXYNOS5250,
+ }, {
+ .compatible = "samsung,exynos5420-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS5420,
},
};
@@ -90,7 +94,8 @@ static int dw_mci_exynos_setup_clock(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250)
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420)
host->bus_hz /= (priv->ciu_div + 1);
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV;
@@ -173,6 +178,8 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos5250-dw-mshc",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos5420-dw-mshc",
+ .data = &exynos_drv_data, },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index b456b0c..f70546a 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -59,7 +59,9 @@ static int dw_mci_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- host->regs = pcim_iomap_table(pdev)[0];
+ host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO];
+
+ pci_set_master(pdev);
ret = dw_mci_probe(host);
if (ret)
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index ee52556..2089752 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -23,6 +23,7 @@
#include <linux/of.h>
#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
{
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 5424073..018f365 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1601,18 +1601,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
- if (pending) {
-
- /*
- * DTO fix - version 2.10a and below, and only if internal DMA
- * is configured.
- */
- if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
- if (!pending &&
- ((mci_readl(host, STATUS) >> 17) & 0x1fff))
- pending |= SDMMC_INT_DATA_OVER;
- }
+ /*
+ * DTO fix - version 2.10a and below, and only if internal DMA
+ * is configured.
+ */
+ if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+ if (!pending &&
+ ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+ pending |= SDMMC_INT_DATA_OVER;
+ }
+ if (pending) {
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 0308c9f..6651633 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -713,7 +713,7 @@ static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
if (gpio_is_valid(pdata->gpio_card_detect)) {
- ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect);
+ ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
if (ret)
return ret;
}
@@ -783,9 +783,8 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(&pdev->dev, res);
- if (!host->base) {
- ret = -EBUSY;
- dev_err(&pdev->dev, "Failed to ioremap base memory\n");
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto err_free_host;
}
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 74145d1..0a87e56 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -36,6 +36,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
+#include <linux/mmc/slot-gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
@@ -1272,33 +1273,11 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
-static int mmc_spi_get_ro(struct mmc_host *mmc)
-{
- struct mmc_spi_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_ro)
- return !!host->pdata->get_ro(mmc->parent);
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
-static int mmc_spi_get_cd(struct mmc_host *mmc)
-{
- struct mmc_spi_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_cd)
- return !!host->pdata->get_cd(mmc->parent);
- return -ENOSYS;
-}
-
static const struct mmc_host_ops mmc_spi_ops = {
.request = mmc_spi_request,
.set_ios = mmc_spi_set_ios,
- .get_ro = mmc_spi_get_ro,
- .get_cd = mmc_spi_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
};
@@ -1324,6 +1303,7 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_host *mmc;
struct mmc_spi_host *host;
int status;
+ bool has_ro = false;
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
@@ -1448,18 +1428,33 @@ static int mmc_spi_probe(struct spi_device *spi)
}
/* pass platform capabilities, if any */
- if (host->pdata)
+ if (host->pdata) {
mmc->caps |= host->pdata->caps;
+ mmc->caps2 |= host->pdata->caps2;
+ }
status = mmc_add_host(mmc);
if (status != 0)
goto fail_add_host;
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) {
+ status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio,
+ host->pdata->cd_debounce);
+ if (status != 0)
+ goto fail_add_host;
+ }
+
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
+ has_ro = true;
+ status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio);
+ if (status != 0)
+ goto fail_add_host;
+ }
+
dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
dev_name(&mmc->class_dev),
host->dma_dev ? "" : ", no DMA",
- (host->pdata && host->pdata->get_ro)
- ? "" : ", no WP",
+ has_ro ? "" : ", no WP",
(host->pdata && host->pdata->setpower)
? "" : ", no poweroff",
(mmc->caps & MMC_CAP_NEEDS_POLL)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 4ddd83f..06c5b0b 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -757,7 +757,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (mvsd_data->gpio_card_detect &&
gpio_is_valid(mvsd_data->gpio_card_detect)) {
ret = mmc_gpio_request_cd(mmc,
- mvsd_data->gpio_card_detect);
+ mvsd_data->gpio_card_detect,
+ 0);
if (ret)
goto out;
} else {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index f38d75f..e1fa3ef 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -102,12 +102,15 @@ static int mxs_mmc_get_cd(struct mmc_host *mmc)
BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
}
-static void mxs_mmc_reset(struct mxs_mmc_host *host)
+static int mxs_mmc_reset(struct mxs_mmc_host *host)
{
struct mxs_ssp *ssp = &host->ssp;
u32 ctrl0, ctrl1;
+ int ret;
- stmp_reset_block(ssp->base);
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ return ret;
ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -132,6 +135,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
writel(ctrl0, ssp->base + HW_SSP_CTRL0);
writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
+ return 0;
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -618,21 +622,25 @@ static int mxs_mmc_probe(struct platform_device *pdev)
}
}
- ssp->clk = clk_get(&pdev->dev, NULL);
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ssp->clk)) {
ret = PTR_ERR(ssp->clk);
goto out_mmc_free;
}
clk_prepare_enable(ssp->clk);
- mxs_mmc_reset(host);
+ ret = mxs_mmc_reset(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
+ goto out_clk_disable;
+ }
ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
if (!ssp->dmach) {
dev_err(mmc_dev(host->mmc),
"%s: failed to request dma\n", __func__);
ret = -ENODEV;
- goto out_clk_put;
+ goto out_clk_disable;
}
/* set mmc core parameters */
@@ -685,9 +693,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
out_free_dma:
if (ssp->dmach)
dma_release_channel(ssp->dmach);
-out_clk_put:
+out_clk_disable:
clk_disable_unprepare(ssp->clk);
- clk_put(ssp->clk);
out_mmc_free:
mmc_free_host(mmc);
return ret;
@@ -705,7 +712,6 @@ static int mxs_mmc_remove(struct platform_device *pdev)
dma_release_channel(ssp->dmach);
clk_disable_unprepare(ssp->clk);
- clk_put(ssp->clk);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index d720b5e..6e218fb 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -50,25 +50,6 @@ static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)
return container_of(dev->platform_data, struct of_mmc_spi, pdata);
}
-static int of_mmc_spi_read_gpio(struct device *dev, int gpio_num)
-{
- struct of_mmc_spi *oms = to_of_mmc_spi(dev);
- bool active_low = oms->alow_gpios[gpio_num];
- bool value = gpio_get_value(oms->gpios[gpio_num]);
-
- return active_low ^ value;
-}
-
-static int of_mmc_spi_get_cd(struct device *dev)
-{
- return of_mmc_spi_read_gpio(dev, CD_GPIO);
-}
-
-static int of_mmc_spi_get_ro(struct device *dev)
-{
- return of_mmc_spi_read_gpio(dev, WP_GPIO);
-}
-
static int of_mmc_spi_init(struct device *dev,
irqreturn_t (*irqhandler)(int, void *), void *mmc)
{
@@ -130,20 +111,22 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
if (!gpio_is_valid(oms->gpios[i]))
continue;
- ret = gpio_request(oms->gpios[i], dev_name(dev));
- if (ret < 0) {
- oms->gpios[i] = -EINVAL;
- continue;
- }
-
if (gpio_flags & OF_GPIO_ACTIVE_LOW)
oms->alow_gpios[i] = true;
}
- if (gpio_is_valid(oms->gpios[CD_GPIO]))
- oms->pdata.get_cd = of_mmc_spi_get_cd;
- if (gpio_is_valid(oms->gpios[WP_GPIO]))
- oms->pdata.get_ro = of_mmc_spi_get_ro;
+ if (gpio_is_valid(oms->gpios[CD_GPIO])) {
+ oms->pdata.cd_gpio = oms->gpios[CD_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_CD_GPIO;
+ if (!oms->alow_gpios[CD_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ }
+ if (gpio_is_valid(oms->gpios[WP_GPIO])) {
+ oms->pdata.ro_gpio = oms->gpios[WP_GPIO];
+ oms->pdata.flags |= MMC_SPI_USE_RO_GPIO;
+ if (!oms->alow_gpios[WP_GPIO])
+ oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
oms->detect_irq = irq_of_parse_and_map(np, 0);
if (oms->detect_irq != 0) {
@@ -166,15 +149,10 @@ void mmc_spi_put_pdata(struct spi_device *spi)
struct device *dev = &spi->dev;
struct device_node *np = dev->of_node;
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
- int i;
if (!dev->platform_data || !np)
return;
- for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) {
- if (gpio_is_valid(oms->gpios[i]))
- gpio_free(oms->gpios[i]);
- }
kfree(oms);
dev->platform_data = NULL;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1865321..6ac63df 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/dmaengine.h>
#include <linux/seq_file.h>
+#include <linux/sizes.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -1041,6 +1042,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
}
}
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
if (end_cmd || ((status & CC_EN) && host->cmd))
omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC_EN)) && host->mrq)
@@ -1060,7 +1062,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
omap_hsmmc_do_irq(host, status);
/* Flush posted write */
- OMAP_HSMMC_WRITE(host->base, STAT, status);
status = OMAP_HSMMC_READ(host->base, STAT);
}
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 0584a1c..36fa2df 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -119,7 +119,7 @@ static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
return byte;
}
-unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
+static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
{
return MIN_FREQ;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1dd5ba8..abc8cf0 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -616,7 +616,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
/* card_detect */
switch (boarddata->cd_type) {
case ESDHC_CD_GPIO:
- err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio);
+ err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
if (err) {
dev_err(mmc_dev(host->mmc),
"failed to request card-detect gpio!\n");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 15039e2..e328252 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -316,6 +316,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
/* call to generic mmc_of_parse to support additional capabilities */
mmc_of_parse(host->mmc);
+ mmc_of_parse_voltage(np, &host->ocr_mask);
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index bf99359..793dacd 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -278,7 +278,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
host->mmc->pm_caps |= pdata->pm_caps;
if (gpio_is_valid(pdata->ext_cd_gpio)) {
- ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
+ ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio,
+ 0);
if (ret) {
dev_err(mmc_dev(host->mmc),
"failed to allocate card detect gpio\n");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 926aaf6..6debda9 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -296,9 +296,12 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
unsigned long timeout;
u16 clk = 0;
- /* don't bother if the clock is going off */
- if (clock == 0)
+ /* If the clock is going off, set to 0 at clock control register */
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ host->clock = clock;
return;
+ }
sdhci_s3c_set_clock(host, clock);
@@ -608,6 +611,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
host->hw_name = "samsung-hsmmc";
host->ops = &sdhci_s3c_ops;
host->quirks = 0;
+ host->quirks2 = 0;
host->irq = irq;
/* Setup quirks for the controller */
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 62a4a83..696122c 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -84,7 +84,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
* gets setup in sdhci_add_host() and we oops.
*/
if (gpio_is_valid(priv->gpio_cd)) {
- ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd);
+ ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0);
if (ret) {
dev_err(&pdev->dev, "card detect irq request failed: %d\n",
ret);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index dd2c083..7a7fb4f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3119,6 +3119,9 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_MAX_CURRENT_MULTIPLIER;
}
+ if (host->ocr_mask)
+ ocr_avail = host->ocr_mask;
+
mmc->ocr_avail = ocr_avail;
mmc->ocr_avail_sdio = ocr_avail;
if (host->ocr_avail_sdio)
@@ -3213,6 +3216,8 @@ int sdhci_add_host(struct sdhci_host *host)
host->tuning_timer.function = sdhci_tuning_timer;
}
+ sdhci_init(host, 0);
+
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(mmc), host);
if (ret) {
@@ -3221,8 +3226,6 @@ int sdhci_add_host(struct sdhci_host *host)
goto untasklet;
}
- sdhci_init(host, 0);
-
#ifdef CONFIG_MMC_DEBUG
sdhci_dumpregs(host);
#endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6706b5e..36629a0 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -61,6 +61,7 @@
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
#include <linux/spinlock.h>
#include <linux/module.h>
@@ -133,6 +134,8 @@
INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
+#define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
+
/* CE_INT_MASK */
#define MASK_ALL 0x00000000
#define MASK_MCCSDE (1 << 29)
@@ -161,7 +164,7 @@
#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
- MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+ MASK_MCRCSTO | MASK_MWDATTO | \
MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
@@ -243,6 +246,8 @@ struct sh_mmcif_host {
int sg_blkidx;
bool power;
bool card_present;
+ bool ccs_enable; /* Command Completion Signal support */
+ bool clk_ctrl2_enable;
struct mutex thread_lock;
/* DMA support */
@@ -386,25 +391,29 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
host->dma_active = false;
- if (!pdata)
- return;
-
- if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ if (pdata) {
+ if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ return;
+ } else if (!host->pd->dev.of_node) {
return;
+ }
/* We can only either use DMA for both Tx and Rx or not use it at all */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_tx);
+ host->chan_tx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ pdata ? (void *)pdata->slave_id_tx : NULL,
+ &host->pd->dev, "tx");
dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
- cfg.slave_id = pdata->slave_id_tx;
+ /* In the OF case the driver will get the slave ID from the DT */
+ if (pdata)
+ cfg.slave_id = pdata->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start + MMCIF_CE_DATA;
cfg.src_addr = 0;
@@ -412,15 +421,17 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
if (ret < 0)
goto ecfgtx;
- host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
- (void *)pdata->slave_id_rx);
+ host->chan_rx = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ pdata ? (void *)pdata->slave_id_rx : NULL,
+ &host->pd->dev, "rx");
dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx)
goto erqrx;
- cfg.slave_id = pdata->slave_id_rx;
+ if (pdata)
+ cfg.slave_id = pdata->slave_id_rx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
cfg.src_addr = res->start + MMCIF_CE_DATA;
@@ -485,8 +496,12 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
+ if (host->ccs_enable)
+ tmp |= SCCSTO_29;
+ if (host->clk_ctrl2_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
- SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+ SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
/* byte swap on */
sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
}
@@ -866,6 +881,9 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
break;
}
+ if (host->ccs_enable)
+ mask |= MASK_MCCSTO;
+
if (mrq->data) {
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
@@ -873,7 +891,10 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
}
opc = sh_mmcif_set_cmd(host, mrq);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
/* set arg */
sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
@@ -956,11 +977,8 @@ static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
{
- struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
struct mmc_host *mmc = host->mmc;
- if (pd && pd->set_pwr)
- pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
if (!IS_ERR(mmc->supply.vmmc))
/* Errors ignored... */
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
@@ -1241,11 +1259,14 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
{
struct sh_mmcif_host *host = dev_id;
- u32 state;
+ u32 state, mask;
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
- sh_mmcif_writel(host->addr, MMCIF_CE_INT,
- ~(state & sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK)));
+ mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
+ if (host->ccs_enable)
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
+ else
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
if (state & ~MASK_CLEAN)
@@ -1379,6 +1400,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host->mmc = mmc;
host->addr = reg;
host->timeout = msecs_to_jiffies(1000);
+ host->ccs_enable = !pd || !pd->ccs_unsupported;
+ host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
host->pd = pdev;
@@ -1436,7 +1459,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
}
if (pd && pd->use_cd_gpio) {
- ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
if (ret < 0)
goto erqcd;
}
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index ebea749..87ed3fb 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -70,20 +70,6 @@ static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
clk_disable(priv->clk);
}
-static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
-{
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- p->set_pwr(pdev, state);
-}
-
-static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
-{
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- return p->get_cd(pdev);
-}
-
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
{
int timeout = 1000;
@@ -129,7 +115,12 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = {
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,shmobile-sdhi" },
{ .compatible = "renesas,sh7372-sdhi" },
+ { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
{ .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -180,10 +171,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->capabilities |= p->tmio_caps;
mmc_data->capabilities2 |= p->tmio_caps2;
mmc_data->cd_gpio = p->cd_gpio;
- if (p->set_pwr)
- mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
- if (p->get_cd)
- mmc_data->get_cd = sh_mobile_sdhi_get_cd;
if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
/*
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 47bdb8f..65edb4a 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
@@ -116,7 +117,6 @@ pio:
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
@@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
+ tmio_mmc_enable_dma(host, false);
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
@@ -197,7 +198,6 @@ pio:
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index b72edb7..b380225 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -795,9 +795,13 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* omap_hsmmc.c driver does.
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
- regulator_enable(mmc->supply.vqmmc);
+ ret = regulator_enable(mmc->supply.vqmmc);
udelay(200);
}
+
+ if (ret < 0)
+ dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
+ ret);
}
static void tmio_mmc_power_off(struct tmio_mmc_host *host)
@@ -932,25 +936,11 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
-static int tmio_mmc_get_cd(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_cd(mmc);
- if (ret >= 0)
- return ret;
-
- if (!pdata->get_cd)
- return -ENOSYS;
- else
- return pdata->get_cd(host->pdev);
-}
-
static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
- .get_cd = tmio_mmc_get_cd,
+ .get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
@@ -1106,7 +1096,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
if (ret < 0) {
tmio_mmc_host_remove(_host);
return ret;
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cb9f361..e9028ad 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2079,7 +2079,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
kref_put(&vub300->kref, vub300_delete);
}
-void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
{ /* NOT irq */
struct vub300_mmc_host *vub300 = mmc_priv(mmc);
dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
diff --git a/drivers/of/base.c b/drivers/of/base.c
index e486e41..865d3f6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1176,65 +1176,10 @@ int of_property_count_strings(struct device_node *np, const char *propname)
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
-/**
- * of_parse_phandle - Resolve a phandle property to a device_node pointer
- * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a phandle value
- * @index: For properties holding a table of phandles, this is the index into
- * the table
- *
- * Returns the device_node pointer with refcount incremented. Use
- * of_node_put() on it when done.
- */
-struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name, int index)
-{
- const __be32 *phandle;
- int size;
-
- phandle = of_get_property(np, phandle_name, &size);
- if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
- return NULL;
-
- return of_find_node_by_phandle(be32_to_cpup(phandle + index));
-}
-EXPORT_SYMBOL(of_parse_phandle);
-
-/**
- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
- * @np: pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cells_name: property name that specifies phandles' arguments count
- * @index: index of a phandle to parse out
- * @out_args: optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->node
- * pointer.
- *
- * Example:
- *
- * phandle1: node1 {
- * #list-cells = <2>;
- * }
- *
- * phandle2: node2 {
- * #list-cells = <1>;
- * }
- *
- * node3 {
- * list = <&phandle1 1 2 &phandle2 3>;
- * }
- *
- * To get a device_node of the `node2' node you may call this:
- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
- */
static int __of_parse_phandle_with_args(const struct device_node *np,
const char *list_name,
- const char *cells_name, int index,
+ const char *cells_name,
+ int cell_count, int index,
struct of_phandle_args *out_args)
{
const __be32 *list, *list_end;
@@ -1262,19 +1207,32 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
if (phandle) {
/*
* Find the provider node and parse the #*-cells
- * property to determine the argument length
+ * property to determine the argument length.
+ *
+ * This is not needed if the cell count is hard-coded
+ * (i.e. cells_name not set, but cell_count is set),
+ * except when we're going to return the found node
+ * below.
*/
- node = of_find_node_by_phandle(phandle);
- if (!node) {
- pr_err("%s: could not find phandle\n",
- np->full_name);
- goto err;
+ if (cells_name || cur_index == index) {
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find phandle\n",
+ np->full_name);
+ goto err;
+ }
}
- if (of_property_read_u32(node, cells_name, &count)) {
- pr_err("%s: could not get %s for %s\n",
- np->full_name, cells_name,
- node->full_name);
- goto err;
+
+ if (cells_name) {
+ if (of_property_read_u32(node, cells_name,
+ &count)) {
+ pr_err("%s: could not get %s for %s\n",
+ np->full_name, cells_name,
+ node->full_name);
+ goto err;
+ }
+ } else {
+ count = cell_count;
}
/*
@@ -1334,17 +1292,117 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
return rc;
}
+/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Returns the device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name, int index)
+{
+ struct of_phandle_args args;
+
+ if (index < 0)
+ return NULL;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+EXPORT_SYMBOL(of_parse_phandle);
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * }
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * }
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name, int index,
struct of_phandle_args *out_args)
{
if (index < 0)
return -EINVAL;
- return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
+ return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
+ index, out_args);
}
EXPORT_SYMBOL(of_parse_phandle_with_args);
/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * }
+ *
+ * phandle2: node2 {
+ * }
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cell_count,
+ int index, struct of_phandle_args *out_args)
+{
+ if (index < 0)
+ return -EINVAL;
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
+
+/**
* of_count_phandle_with_args() - Find the number of phandles references in a property
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
@@ -1362,7 +1420,8 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name)
{
- return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
+ return __of_parse_phandle_with_args(np, list_name, cells_name, 0, -1,
+ NULL);
}
EXPORT_SYMBOL(of_count_phandle_with_args);
@@ -1734,6 +1793,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
ap = dt_alloc(sizeof(*ap) + len + 1, 4);
if (!ap)
continue;
+ memset(ap, 0, sizeof(*ap) + len + 1);
ap->alias = start;
of_alias_add(ap, np, id, start, len);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 4fb06f3..229dd9d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,12 +11,14 @@
#include <linux/kernel.h>
#include <linux/initrd.h>
+#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#ifdef CONFIG_PPC
@@ -125,13 +127,13 @@ int of_fdt_match(struct boot_param_header *blob, unsigned long node,
return score;
}
-static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+static void *unflatten_dt_alloc(void **mem, unsigned long size,
unsigned long align)
{
void *res;
- *mem = ALIGN(*mem, align);
- res = (void *)*mem;
+ *mem = PTR_ALIGN(*mem, align);
+ res = *mem;
*mem += size;
return res;
@@ -146,9 +148,9 @@ static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
* @allnextpp: pointer to ->allnext from last allocated device_node
* @fpsize: Size of the node path up at the current depth.
*/
-static unsigned long unflatten_dt_node(struct boot_param_header *blob,
- unsigned long mem,
- unsigned long *p,
+static void * unflatten_dt_node(struct boot_param_header *blob,
+ void *mem,
+ void **p,
struct device_node *dad,
struct device_node ***allnextpp,
unsigned long fpsize)
@@ -161,15 +163,15 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
int has_name = 0;
int new_format = 0;
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
if (tag != OF_DT_BEGIN_NODE) {
pr_err("Weird tag at start of node: %x\n", tag);
return mem;
}
*p += 4;
- pathp = (char *)*p;
+ pathp = *p;
l = allocl = strlen(pathp) + 1;
- *p = ALIGN(*p + l, 4);
+ *p = PTR_ALIGN(*p + l, 4);
/* version 0x10 has a more compact unit name here instead of the full
* path. we accumulate the full path size using "fpsize", we'll rebuild
@@ -201,7 +203,6 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
__alignof__(struct device_node));
if (allnextpp) {
char *fn;
- memset(np, 0, sizeof(*np));
np->full_name = fn = ((char *)np) + sizeof(*np);
if (new_format) {
/* rebuild full path for new format */
@@ -239,7 +240,7 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
u32 sz, noff;
char *pname;
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
if (tag == OF_DT_NOP) {
*p += 4;
continue;
@@ -247,11 +248,11 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
if (tag != OF_DT_PROP)
break;
*p += 4;
- sz = be32_to_cpup((__be32 *)(*p));
- noff = be32_to_cpup((__be32 *)((*p) + 4));
+ sz = be32_to_cpup(*p);
+ noff = be32_to_cpup(*p + 4);
*p += 8;
if (be32_to_cpu(blob->version) < 0x10)
- *p = ALIGN(*p, sz >= 8 ? 8 : 4);
+ *p = PTR_ALIGN(*p, sz >= 8 ? 8 : 4);
pname = of_fdt_get_string(blob, noff);
if (pname == NULL) {
@@ -281,11 +282,11 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
np->phandle = be32_to_cpup((__be32 *)*p);
pp->name = pname;
pp->length = sz;
- pp->value = (void *)*p;
+ pp->value = *p;
*prev_pp = pp;
prev_pp = &pp->next;
}
- *p = ALIGN((*p) + sz, 4);
+ *p = PTR_ALIGN((*p) + sz, 4);
}
/* with version 0x10 we may not have the name property, recreate
* it here from the unit name if absent
@@ -334,7 +335,7 @@ static unsigned long unflatten_dt_node(struct boot_param_header *blob,
else
mem = unflatten_dt_node(blob, mem, p, np, allnextpp,
fpsize);
- tag = be32_to_cpup((__be32 *)(*p));
+ tag = be32_to_cpup(*p);
}
if (tag != OF_DT_END_NODE) {
pr_err("Weird tag at end of node: %x\n", tag);
@@ -360,7 +361,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
struct device_node **mynodes,
void * (*dt_alloc)(u64 size, u64 align))
{
- unsigned long start, mem, size;
+ unsigned long size;
+ void *start, *mem;
struct device_node **allnextp = mynodes;
pr_debug(" -> unflatten_device_tree()\n");
@@ -381,32 +383,28 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
}
/* First pass, scan for size */
- start = ((unsigned long)blob) +
- be32_to_cpu(blob->off_dt_struct);
- size = unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
- size = (size | 3) + 1;
+ start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
+ size = (unsigned long)unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
+ size = ALIGN(size, 4);
pr_debug(" size is %lx, allocating...\n", size);
/* Allocate memory for the expanded device tree */
- mem = (unsigned long)
- dt_alloc(size + 4, __alignof__(struct device_node));
+ mem = dt_alloc(size + 4, __alignof__(struct device_node));
+ memset(mem, 0, size);
- memset((void *)mem, 0, size);
+ *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
- ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
-
- pr_debug(" unflattening %lx...\n", mem);
+ pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
- start = ((unsigned long)blob) +
- be32_to_cpu(blob->off_dt_struct);
+ start = ((void *)blob) + be32_to_cpu(blob->off_dt_struct);
unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0);
- if (be32_to_cpup((__be32 *)start) != OF_DT_END)
- pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
- if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
+ if (be32_to_cpup(start) != OF_DT_END)
+ pr_warning("Weird tag at end of tree: %08x\n", be32_to_cpup(start));
+ if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warning("End of tree marker overwritten: %08x\n",
- be32_to_cpu(((__be32 *)mem)[size / 4]));
+ be32_to_cpup(mem + size));
*allnextp = NULL;
pr_debug(" <- unflatten_device_tree()\n");
@@ -628,7 +626,8 @@ int __init of_scan_flat_dt_by_path(const char *path,
*/
void __init early_init_dt_check_for_initrd(unsigned long node)
{
- unsigned long start, end, len;
+ u64 start, end;
+ unsigned long len;
__be32 *prop;
pr_debug("Looking for initrd properties... ");
@@ -636,15 +635,16 @@ void __init early_init_dt_check_for_initrd(unsigned long node)
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
if (!prop)
return;
- start = of_read_ulong(prop, len/4);
+ start = of_read_number(prop, len/4);
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
if (!prop)
return;
- end = of_read_ulong(prop, len/4);
+ end = of_read_number(prop, len/4);
early_init_dt_setup_initrd_arch(start, end);
- pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end);
+ pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
+ (unsigned long long)start, (unsigned long long)end);
}
#else
inline void early_init_dt_check_for_initrd(unsigned long node)
@@ -774,6 +774,17 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
return 1;
}
+#ifdef CONFIG_HAVE_MEMBLOCK
+/*
+ * called from unflatten_device_tree() to bootstrap devicetree itself
+ * Architectures can override this definition if memblock isn't used
+ */
+void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+#endif
+
/**
* unflatten_device_tree - create tree of device_nodes from flat blob
*
@@ -792,3 +803,14 @@ void __init unflatten_device_tree(void)
}
#endif /* CONFIG_OF_EARLY_FLATTREE */
+
+/* Feed entire flattened device tree into the random pool */
+static int __init add_fdt_randomness(void)
+{
+ if (initial_boot_params)
+ add_device_randomness(initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+
+ return 0;
+}
+core_initcall(add_fdt_randomness);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 1264923..1752988 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -28,7 +28,7 @@
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
- * @device: Device node of the device whose interrupt is to be mapped
+ * @dev: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
* This function is a wrapper that chains of_irq_map_one() and
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index ea174c8..8f9be2e 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -39,7 +39,7 @@ static const char *phy_modes[] = {
* The function gets phy interface string from property 'phy-mode',
* and return its index in phy_modes table, or errno in error case.
*/
-const int of_get_phy_mode(struct device_node *np)
+int of_get_phy_mode(struct device_node *np)
{
const char *pm;
int err, i;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index eeca8a5..9b439ac 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -197,7 +197,7 @@ EXPORT_SYMBOL(of_device_alloc);
* Returns pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
-struct platform_device *of_platform_device_create_pdata(
+static struct platform_device *of_platform_device_create_pdata(
struct device_node *np,
const char *bus_id,
void *platform_data,
@@ -268,8 +268,11 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
return NULL;
dev = amba_device_alloc(NULL, 0, 0);
- if (!dev)
+ if (!dev) {
+ pr_err("%s(): amba_device_alloc() failed for %s\n",
+ __func__, node->full_name);
return NULL;
+ }
/* setup generic device info */
dev->dev.coherent_dma_mask = ~0;
@@ -294,12 +297,18 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev->irq[i] = irq_of_parse_and_map(node, i);
ret = of_address_to_resource(node, 0, &dev->res);
- if (ret)
+ if (ret) {
+ pr_err("%s(): of_address_to_resource() failed (%d) for %s\n",
+ __func__, ret, node->full_name);
goto err_free;
+ }
ret = amba_device_add(dev, &iomem_resource);
- if (ret)
+ if (ret) {
+ pr_err("%s(): amba_device_add() failed (%d) for %s\n",
+ __func__, ret, node->full_name);
goto err_free;
+ }
return dev;
@@ -378,6 +387,10 @@ static int of_platform_bus_create(struct device_node *bus,
}
if (of_device_is_compatible(bus, "arm,primecell")) {
+ /*
+ * Don't return an error here to keep compatibility with older
+ * device tree files.
+ */
of_amba_device_create(bus, bus_id, platform_data, parent);
return 0;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 7b8979c..bb49ab6 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -216,6 +216,13 @@ config BATTERY_S3C_ADC
help
Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
+config BATTERY_TWL4030_MADC
+ tristate "TWL4030 MADC battery driver"
+ depends on TWL4030_MADC
+ help
+ Say Y here to enable this dumb driver for batteries managed
+ through the TWL4030 MADC.
+
config CHARGER_88PM860X
tristate "Marvell 88PM860x Charger driver"
depends on MFD_88PM860X && BATTERY_88PM860X
@@ -334,6 +341,12 @@ config CHARGER_BQ2415X
You'll need this driver to charge batteries on e.g. Nokia
RX-51/N900.
+config CHARGER_BQ24190
+ tristate "TI BQ24190 battery charger driver"
+ depends on I2C && GPIOLIB
+ help
+ Say Y to enable support for the TI BQ24190 battery charger.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
@@ -357,7 +370,7 @@ config AB8500_BM
config BATTERY_GOLDFISH
tristate "Goldfish battery driver"
- depends on GENERIC_HARDIRQS
+ depends on GENERIC_HARDIRQS && (GOLDFISH || COMPILE_TEST)
help
Say Y to enable support for the battery and AC power in the
Goldfish emulator.
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 653bf6c..a4b7417 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
+obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
@@ -50,6 +51,7 @@ obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
+obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index f098fda..a4c4a10 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -774,6 +774,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
di->max_usb_in_curr.usb_type_max);
+ break;
case USB_STAT_NOT_VALID_LINK:
dev_err(di->dev, "USB Type invalid - try charging anyway\n");
di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
new file mode 100644
index 0000000..ad3ff8f
--- /dev/null
+++ b/drivers/power/bq24190_charger.c
@@ -0,0 +1,1549 @@
+/*
+ * Driver for the TI bq24190 battery charger.
+ *
+ * Author: Mark A. Greer <mgreer@animalcreek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/power/bq24190_charger.h>
+
+
+#define BQ24190_MANUFACTURER "Texas Instruments"
+
+#define BQ24190_REG_ISC 0x00 /* Input Source Control */
+#define BQ24190_REG_ISC_EN_HIZ_MASK BIT(7)
+#define BQ24190_REG_ISC_EN_HIZ_SHIFT 7
+#define BQ24190_REG_ISC_VINDPM_MASK (BIT(6) | BIT(5) | BIT(4) | \
+ BIT(3))
+#define BQ24190_REG_ISC_VINDPM_SHIFT 3
+#define BQ24190_REG_ISC_IINLIM_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_ISC_IINLIM_SHIFT 0
+
+#define BQ24190_REG_POC 0x01 /* Power-On Configuration */
+#define BQ24190_REG_POC_RESET_MASK BIT(7)
+#define BQ24190_REG_POC_RESET_SHIFT 7
+#define BQ24190_REG_POC_WDT_RESET_MASK BIT(6)
+#define BQ24190_REG_POC_WDT_RESET_SHIFT 6
+#define BQ24190_REG_POC_CHG_CONFIG_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_POC_CHG_CONFIG_SHIFT 4
+#define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1))
+#define BQ24190_REG_POC_SYS_MIN_SHIFT 1
+#define BQ24190_REG_POC_BOOST_LIM_MASK BIT(0)
+#define BQ24190_REG_POC_BOOST_LIM_SHIFT 0
+
+#define BQ24190_REG_CCC 0x02 /* Charge Current Control */
+#define BQ24190_REG_CCC_ICHG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CCC_ICHG_SHIFT 2
+#define BQ24190_REG_CCC_FORCE_20PCT_MASK BIT(0)
+#define BQ24190_REG_CCC_FORCE_20PCT_SHIFT 0
+
+#define BQ24190_REG_PCTCC 0x03 /* Pre-charge/Termination Current Cntl */
+#define BQ24190_REG_PCTCC_IPRECHG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4))
+#define BQ24190_REG_PCTCC_IPRECHG_SHIFT 4
+#define BQ24190_REG_PCTCC_ITERM_MASK (BIT(3) | BIT(2) | BIT(1) | \
+ BIT(0))
+#define BQ24190_REG_PCTCC_ITERM_SHIFT 0
+
+#define BQ24190_REG_CVC 0x04 /* Charge Voltage Control */
+#define BQ24190_REG_CVC_VREG_MASK (BIT(7) | BIT(6) | BIT(5) | \
+ BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_CVC_VREG_SHIFT 2
+#define BQ24190_REG_CVC_BATLOWV_MASK BIT(1)
+#define BQ24190_REG_CVC_BATLOWV_SHIFT 1
+#define BQ24190_REG_CVC_VRECHG_MASK BIT(0)
+#define BQ24190_REG_CVC_VRECHG_SHIFT 0
+
+#define BQ24190_REG_CTTC 0x05 /* Charge Term/Timer Control */
+#define BQ24190_REG_CTTC_EN_TERM_MASK BIT(7)
+#define BQ24190_REG_CTTC_EN_TERM_SHIFT 7
+#define BQ24190_REG_CTTC_TERM_STAT_MASK BIT(6)
+#define BQ24190_REG_CTTC_TERM_STAT_SHIFT 6
+#define BQ24190_REG_CTTC_WATCHDOG_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_CTTC_WATCHDOG_SHIFT 4
+#define BQ24190_REG_CTTC_EN_TIMER_MASK BIT(3)
+#define BQ24190_REG_CTTC_EN_TIMER_SHIFT 3
+#define BQ24190_REG_CTTC_CHG_TIMER_MASK (BIT(2) | BIT(1))
+#define BQ24190_REG_CTTC_CHG_TIMER_SHIFT 1
+#define BQ24190_REG_CTTC_JEITA_ISET_MASK BIT(0)
+#define BQ24190_REG_CTTC_JEITA_ISET_SHIFT 0
+
+#define BQ24190_REG_ICTRC 0x06 /* IR Comp/Thermal Regulation Control */
+#define BQ24190_REG_ICTRC_BAT_COMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BQ24190_REG_ICTRC_BAT_COMP_SHIFT 5
+#define BQ24190_REG_ICTRC_VCLAMP_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BQ24190_REG_ICTRC_VCLAMP_SHIFT 2
+#define BQ24190_REG_ICTRC_TREG_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_ICTRC_TREG_SHIFT 0
+
+#define BQ24190_REG_MOC 0x07 /* Misc. Operation Control */
+#define BQ24190_REG_MOC_DPDM_EN_MASK BIT(7)
+#define BQ24190_REG_MOC_DPDM_EN_SHIFT 7
+#define BQ24190_REG_MOC_TMR2X_EN_MASK BIT(6)
+#define BQ24190_REG_MOC_TMR2X_EN_SHIFT 6
+#define BQ24190_REG_MOC_BATFET_DISABLE_MASK BIT(5)
+#define BQ24190_REG_MOC_BATFET_DISABLE_SHIFT 5
+#define BQ24190_REG_MOC_JEITA_VSET_MASK BIT(4)
+#define BQ24190_REG_MOC_JEITA_VSET_SHIFT 4
+#define BQ24190_REG_MOC_INT_MASK_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_MOC_INT_MASK_SHIFT 0
+
+#define BQ24190_REG_SS 0x08 /* System Status */
+#define BQ24190_REG_SS_VBUS_STAT_MASK (BIT(7) | BIT(6))
+#define BQ24190_REG_SS_VBUS_STAT_SHIFT 6
+#define BQ24190_REG_SS_CHRG_STAT_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_SS_CHRG_STAT_SHIFT 4
+#define BQ24190_REG_SS_DPM_STAT_MASK BIT(3)
+#define BQ24190_REG_SS_DPM_STAT_SHIFT 3
+#define BQ24190_REG_SS_PG_STAT_MASK BIT(2)
+#define BQ24190_REG_SS_PG_STAT_SHIFT 2
+#define BQ24190_REG_SS_THERM_STAT_MASK BIT(1)
+#define BQ24190_REG_SS_THERM_STAT_SHIFT 1
+#define BQ24190_REG_SS_VSYS_STAT_MASK BIT(0)
+#define BQ24190_REG_SS_VSYS_STAT_SHIFT 0
+
+#define BQ24190_REG_F 0x09 /* Fault */
+#define BQ24190_REG_F_WATCHDOG_FAULT_MASK BIT(7)
+#define BQ24190_REG_F_WATCHDOG_FAULT_SHIFT 7
+#define BQ24190_REG_F_BOOST_FAULT_MASK BIT(6)
+#define BQ24190_REG_F_BOOST_FAULT_SHIFT 6
+#define BQ24190_REG_F_CHRG_FAULT_MASK (BIT(5) | BIT(4))
+#define BQ24190_REG_F_CHRG_FAULT_SHIFT 4
+#define BQ24190_REG_F_BAT_FAULT_MASK BIT(3)
+#define BQ24190_REG_F_BAT_FAULT_SHIFT 3
+#define BQ24190_REG_F_NTC_FAULT_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BQ24190_REG_F_NTC_FAULT_SHIFT 0
+
+#define BQ24190_REG_VPRS 0x0A /* Vendor/Part/Revision Status */
+#define BQ24190_REG_VPRS_PN_MASK (BIT(5) | BIT(4) | BIT(3))
+#define BQ24190_REG_VPRS_PN_SHIFT 3
+#define BQ24190_REG_VPRS_PN_24190 0x4
+#define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193 */
+#define BQ24190_REG_VPRS_PN_24192I 0x3
+#define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2)
+#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2
+#define BQ24190_REG_VPRS_DEV_REG_MASK (BIT(1) | BIT(0))
+#define BQ24190_REG_VPRS_DEV_REG_SHIFT 0
+
+/*
+ * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
+ * so the first read after a fault returns the latched value and subsequent
+ * reads return the current value. In order to return the fault status
+ * to the user, have the interrupt handler save the reg's value and retrieve
+ * it in the appropriate health/status routine. Each routine has its own
+ * flag indicating whether it should use the value stored by the last run
+ * of the interrupt handler or do an actual reg read. That way each routine
+ * can report back whatever fault may have occured.
+ */
+struct bq24190_dev_info {
+ struct i2c_client *client;
+ struct device *dev;
+ struct power_supply charger;
+ struct power_supply battery;
+ char model_name[I2C_NAME_SIZE];
+ kernel_ulong_t model;
+ unsigned int gpio_int;
+ unsigned int irq;
+ struct mutex f_reg_lock;
+ bool first_time;
+ bool charger_health_valid;
+ bool battery_health_valid;
+ bool battery_status_valid;
+ u8 f_reg;
+ u8 ss_reg;
+ u8 watchdog;
+};
+
+/*
+ * The tables below provide a 2-way mapping for the value that goes in
+ * the register field and the real-world value that it represents.
+ * The index of the array is the value that goes in the register; the
+ * number at that index in the array is the real-world value that it
+ * represents.
+ */
+/* REG02[7:2] (ICHG) in uAh */
+static const int bq24190_ccc_ichg_values[] = {
+ 512000, 576000, 640000, 704000, 768000, 832000, 896000, 960000,
+ 1024000, 1088000, 1152000, 1216000, 1280000, 1344000, 1408000, 1472000,
+ 1536000, 1600000, 1664000, 1728000, 1792000, 1856000, 1920000, 1984000,
+ 2048000, 2112000, 2176000, 2240000, 2304000, 2368000, 2432000, 2496000,
+ 2560000, 2624000, 2688000, 2752000, 2816000, 2880000, 2944000, 3008000,
+ 3072000, 3136000, 3200000, 3264000, 3328000, 3392000, 3456000, 3520000,
+ 3584000, 3648000, 3712000, 3776000, 3840000, 3904000, 3968000, 4032000,
+ 4096000, 4160000, 4224000, 4288000, 4352000, 4416000, 4480000, 4544000
+};
+
+/* REG04[7:2] (VREG) in uV */
+static const int bq24190_cvc_vreg_values[] = {
+ 3504000, 3520000, 3536000, 3552000, 3568000, 3584000, 3600000, 3616000,
+ 3632000, 3648000, 3664000, 3680000, 3696000, 3712000, 3728000, 3744000,
+ 3760000, 3776000, 3792000, 3808000, 3824000, 3840000, 3856000, 3872000,
+ 3888000, 3904000, 3920000, 3936000, 3952000, 3968000, 3984000, 4000000,
+ 4016000, 4032000, 4048000, 4064000, 4080000, 4096000, 4112000, 4128000,
+ 4144000, 4160000, 4176000, 4192000, 4208000, 4224000, 4240000, 4256000,
+ 4272000, 4288000, 4304000, 4320000, 4336000, 4352000, 4368000, 4384000,
+ 4400000
+};
+
+/* REG06[1:0] (TREG) in tenths of degrees Celcius */
+static const int bq24190_ictrc_treg_values[] = {
+ 600, 800, 1000, 1200
+};
+
+/*
+ * Return the index in 'tbl' of greatest value that is less than or equal to
+ * 'val'. The index range returned is 0 to 'tbl_size' - 1. Assumes that
+ * the values in 'tbl' are sorted from smallest to largest and 'tbl_size'
+ * is less than 2^8.
+ */
+static u8 bq24190_find_idx(const int tbl[], int tbl_size, int v)
+{
+ int i;
+
+ for (i = 1; i < tbl_size; i++)
+ if (v < tbl[i])
+ break;
+
+ return i - 1;
+}
+
+/* Basic driver I/O routines */
+
+static int bq24190_read(struct bq24190_dev_info *bdi, u8 reg, u8 *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(bdi->client, reg);
+ if (ret < 0)
+ return ret;
+
+ *data = ret;
+ return 0;
+}
+
+static int bq24190_write(struct bq24190_dev_info *bdi, u8 reg, u8 data)
+{
+ return i2c_smbus_write_byte_data(bdi->client, reg, data);
+}
+
+static int bq24190_read_mask(struct bq24190_dev_info *bdi, u8 reg,
+ u8 mask, u8 shift, u8 *data)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read(bdi, reg, &v);
+ if (ret < 0)
+ return ret;
+
+ v &= mask;
+ v >>= shift;
+ *data = v;
+
+ return 0;
+}
+
+static int bq24190_write_mask(struct bq24190_dev_info *bdi, u8 reg,
+ u8 mask, u8 shift, u8 data)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read(bdi, reg, &v);
+ if (ret < 0)
+ return ret;
+
+ v &= ~mask;
+ v |= ((data << shift) & mask);
+
+ return bq24190_write(bdi, reg, v);
+}
+
+static int bq24190_get_field_val(struct bq24190_dev_info *bdi,
+ u8 reg, u8 mask, u8 shift,
+ const int tbl[], int tbl_size,
+ int *val)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, reg, mask, shift, &v);
+ if (ret < 0)
+ return ret;
+
+ v = (v >= tbl_size) ? (tbl_size - 1) : v;
+ *val = tbl[v];
+
+ return 0;
+}
+
+static int bq24190_set_field_val(struct bq24190_dev_info *bdi,
+ u8 reg, u8 mask, u8 shift,
+ const int tbl[], int tbl_size,
+ int val)
+{
+ u8 idx;
+
+ idx = bq24190_find_idx(tbl, tbl_size, val);
+
+ return bq24190_write_mask(bdi, reg, mask, shift, idx);
+}
+
+#ifdef CONFIG_SYSFS
+/*
+ * There are a numerous options that are configurable on the bq24190
+ * that go well beyond what the power_supply properties provide access to.
+ * Provide sysfs access to them so they can be examined and possibly modified
+ * on the fly. They will be provided for the charger power_supply object only
+ * and will be prefixed by 'f_' to make them easier to recognize.
+ */
+
+#define BQ24190_SYSFS_FIELD(_name, r, f, m, store) \
+{ \
+ .attr = __ATTR(f_##_name, m, bq24190_sysfs_show, store), \
+ .reg = BQ24190_REG_##r, \
+ .mask = BQ24190_REG_##r##_##f##_MASK, \
+ .shift = BQ24190_REG_##r##_##f##_SHIFT, \
+}
+
+#define BQ24190_SYSFS_FIELD_RW(_name, r, f) \
+ BQ24190_SYSFS_FIELD(_name, r, f, S_IWUSR | S_IRUGO, \
+ bq24190_sysfs_store)
+
+#define BQ24190_SYSFS_FIELD_RO(_name, r, f) \
+ BQ24190_SYSFS_FIELD(_name, r, f, S_IRUGO, NULL)
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t bq24190_sysfs_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+struct bq24190_sysfs_field_info {
+ struct device_attribute attr;
+ u8 reg;
+ u8 mask;
+ u8 shift;
+};
+
+/* On i386 ptrace-abi.h defines SS that breaks the macro calls below. */
+#undef SS
+
+static struct bq24190_sysfs_field_info bq24190_sysfs_field_tbl[] = {
+ /* sysfs name reg field in reg */
+ BQ24190_SYSFS_FIELD_RW(en_hiz, ISC, EN_HIZ),
+ BQ24190_SYSFS_FIELD_RW(vindpm, ISC, VINDPM),
+ BQ24190_SYSFS_FIELD_RW(iinlim, ISC, IINLIM),
+ BQ24190_SYSFS_FIELD_RW(chg_config, POC, CHG_CONFIG),
+ BQ24190_SYSFS_FIELD_RW(sys_min, POC, SYS_MIN),
+ BQ24190_SYSFS_FIELD_RW(boost_lim, POC, BOOST_LIM),
+ BQ24190_SYSFS_FIELD_RW(ichg, CCC, ICHG),
+ BQ24190_SYSFS_FIELD_RW(force_20_pct, CCC, FORCE_20PCT),
+ BQ24190_SYSFS_FIELD_RW(iprechg, PCTCC, IPRECHG),
+ BQ24190_SYSFS_FIELD_RW(iterm, PCTCC, ITERM),
+ BQ24190_SYSFS_FIELD_RW(vreg, CVC, VREG),
+ BQ24190_SYSFS_FIELD_RW(batlowv, CVC, BATLOWV),
+ BQ24190_SYSFS_FIELD_RW(vrechg, CVC, VRECHG),
+ BQ24190_SYSFS_FIELD_RW(en_term, CTTC, EN_TERM),
+ BQ24190_SYSFS_FIELD_RW(term_stat, CTTC, TERM_STAT),
+ BQ24190_SYSFS_FIELD_RO(watchdog, CTTC, WATCHDOG),
+ BQ24190_SYSFS_FIELD_RW(en_timer, CTTC, EN_TIMER),
+ BQ24190_SYSFS_FIELD_RW(chg_timer, CTTC, CHG_TIMER),
+ BQ24190_SYSFS_FIELD_RW(jeta_iset, CTTC, JEITA_ISET),
+ BQ24190_SYSFS_FIELD_RW(bat_comp, ICTRC, BAT_COMP),
+ BQ24190_SYSFS_FIELD_RW(vclamp, ICTRC, VCLAMP),
+ BQ24190_SYSFS_FIELD_RW(treg, ICTRC, TREG),
+ BQ24190_SYSFS_FIELD_RW(dpdm_en, MOC, DPDM_EN),
+ BQ24190_SYSFS_FIELD_RW(tmr2x_en, MOC, TMR2X_EN),
+ BQ24190_SYSFS_FIELD_RW(batfet_disable, MOC, BATFET_DISABLE),
+ BQ24190_SYSFS_FIELD_RW(jeita_vset, MOC, JEITA_VSET),
+ BQ24190_SYSFS_FIELD_RO(int_mask, MOC, INT_MASK),
+ BQ24190_SYSFS_FIELD_RO(vbus_stat, SS, VBUS_STAT),
+ BQ24190_SYSFS_FIELD_RO(chrg_stat, SS, CHRG_STAT),
+ BQ24190_SYSFS_FIELD_RO(dpm_stat, SS, DPM_STAT),
+ BQ24190_SYSFS_FIELD_RO(pg_stat, SS, PG_STAT),
+ BQ24190_SYSFS_FIELD_RO(therm_stat, SS, THERM_STAT),
+ BQ24190_SYSFS_FIELD_RO(vsys_stat, SS, VSYS_STAT),
+ BQ24190_SYSFS_FIELD_RO(watchdog_fault, F, WATCHDOG_FAULT),
+ BQ24190_SYSFS_FIELD_RO(boost_fault, F, BOOST_FAULT),
+ BQ24190_SYSFS_FIELD_RO(chrg_fault, F, CHRG_FAULT),
+ BQ24190_SYSFS_FIELD_RO(bat_fault, F, BAT_FAULT),
+ BQ24190_SYSFS_FIELD_RO(ntc_fault, F, NTC_FAULT),
+ BQ24190_SYSFS_FIELD_RO(pn, VPRS, PN),
+ BQ24190_SYSFS_FIELD_RO(ts_profile, VPRS, TS_PROFILE),
+ BQ24190_SYSFS_FIELD_RO(dev_reg, VPRS, DEV_REG),
+};
+
+static struct attribute *
+ bq24190_sysfs_attrs[ARRAY_SIZE(bq24190_sysfs_field_tbl) + 1];
+
+static const struct attribute_group bq24190_sysfs_attr_group = {
+ .attrs = bq24190_sysfs_attrs,
+};
+
+static void bq24190_sysfs_init_attrs(void)
+{
+ int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+ for (i = 0; i < limit; i++)
+ bq24190_sysfs_attrs[i] = &bq24190_sysfs_field_tbl[i].attr.attr;
+
+ bq24190_sysfs_attrs[limit] = NULL; /* Has additional entry for this */
+}
+
+static struct bq24190_sysfs_field_info *bq24190_sysfs_field_lookup(
+ const char *name)
+{
+ int i, limit = ARRAY_SIZE(bq24190_sysfs_field_tbl);
+
+ for (i = 0; i < limit; i++)
+ if (!strcmp(name, bq24190_sysfs_field_tbl[i].attr.attr.name))
+ break;
+
+ if (i >= limit)
+ return NULL;
+
+ return &bq24190_sysfs_field_tbl[i];
+}
+
+static ssize_t bq24190_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ struct bq24190_sysfs_field_info *info;
+ int ret;
+ u8 v;
+
+ info = bq24190_sysfs_field_lookup(attr->attr.name);
+ if (!info)
+ return -EINVAL;
+
+ ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%hhx\n", v);
+}
+
+static ssize_t bq24190_sysfs_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ struct bq24190_sysfs_field_info *info;
+ int ret;
+ u8 v;
+
+ info = bq24190_sysfs_field_lookup(attr->attr.name);
+ if (!info)
+ return -EINVAL;
+
+ ret = kstrtou8(buf, 0, &v);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+ bq24190_sysfs_init_attrs();
+
+ return sysfs_create_group(&bdi->charger.dev->kobj,
+ &bq24190_sysfs_attr_group);
+}
+
+static void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi)
+{
+ sysfs_remove_group(&bdi->charger.dev->kobj, &bq24190_sysfs_attr_group);
+}
+#else
+static int bq24190_sysfs_create_group(struct bq24190_dev_info *bdi)
+{
+ return 0;
+}
+
+static inline void bq24190_sysfs_remove_group(struct bq24190_dev_info *bdi) {}
+#endif
+
+/*
+ * According to the "Host Mode and default Mode" section of the
+ * manual, a write to any register causes the bq24190 to switch
+ * from default mode to host mode. It will switch back to default
+ * mode after a WDT timeout unless the WDT is turned off as well.
+ * So, by simply turning off the WDT, we accomplish both with the
+ * same write.
+ */
+static int bq24190_set_mode_host(struct bq24190_dev_info *bdi)
+{
+ int ret;
+ u8 v;
+
+ ret = bq24190_read(bdi, BQ24190_REG_CTTC, &v);
+ if (ret < 0)
+ return ret;
+
+ bdi->watchdog = ((v & BQ24190_REG_CTTC_WATCHDOG_MASK) >>
+ BQ24190_REG_CTTC_WATCHDOG_SHIFT);
+ v &= ~BQ24190_REG_CTTC_WATCHDOG_MASK;
+
+ return bq24190_write(bdi, BQ24190_REG_CTTC, v);
+}
+
+static int bq24190_register_reset(struct bq24190_dev_info *bdi)
+{
+ int ret, limit = 100;
+ u8 v;
+
+ /* Reset the registers */
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_RESET_MASK,
+ BQ24190_REG_POC_RESET_SHIFT,
+ 0x1);
+ if (ret < 0)
+ return ret;
+
+ /* Reset bit will be cleared by hardware so poll until it is */
+ do {
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_RESET_MASK,
+ BQ24190_REG_POC_RESET_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ if (!v)
+ break;
+
+ udelay(10);
+ } while (--limit);
+
+ if (!limit)
+ return -EIO;
+
+ return 0;
+}
+
+/* Charger power supply property routines */
+
+static int bq24190_charger_get_charge_type(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int type, ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ /* If POC[CHG_CONFIG] (REG01[5:4]) == 0, charge is disabled */
+ if (!v) {
+ type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ } else {
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ type = (v) ? POWER_SUPPLY_CHARGE_TYPE_TRICKLE :
+ POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ u8 chg_config, force_20pct, en_term;
+ int ret;
+
+ /*
+ * According to the "Termination when REG02[0] = 1" section of
+ * the bq24190 manual, the trickle charge could be less than the
+ * termination current so it recommends turning off the termination
+ * function.
+ *
+ * Note: AFAICT from the datasheet, the user will have to manually
+ * turn off the charging when in 20% mode. If its not turned off,
+ * there could be battery damage. So, use this mode at your own risk.
+ */
+ switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_TYPE_NONE:
+ chg_config = 0x0;
+ break;
+ case POWER_SUPPLY_CHARGE_TYPE_TRICKLE:
+ chg_config = 0x1;
+ force_20pct = 0x1;
+ en_term = 0x0;
+ break;
+ case POWER_SUPPLY_CHARGE_TYPE_FAST:
+ chg_config = 0x1;
+ force_20pct = 0x0;
+ en_term = 0x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (chg_config) { /* Enabling the charger */
+ ret = bq24190_write_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT,
+ force_20pct);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_write_mask(bdi, BQ24190_REG_CTTC,
+ BQ24190_REG_CTTC_EN_TERM_MASK,
+ BQ24190_REG_CTTC_EN_TERM_SHIFT,
+ en_term);
+ if (ret < 0)
+ return ret;
+ }
+
+ return bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT, chg_config);
+}
+
+static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int health, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->charger_health_valid) {
+ v = bdi->f_reg;
+ bdi->charger_health_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
+ /*
+ * This could be over-current or over-voltage but there's
+ * no way to tell which. Return 'OVERVOLTAGE' since there
+ * isn't an 'OVERCURRENT' value defined that we can return
+ * even if it was over-current.
+ */
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ v &= BQ24190_REG_F_CHRG_FAULT_MASK;
+ v >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+ switch (v) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* Input Fault (VBUS OVP or VBAT<VBUS<3.8V) */
+ /*
+ * This could be over-voltage or under-voltage
+ * and there's no way to tell which. Instead
+ * of looking foolish and returning 'OVERVOLTAGE'
+ * when its really under-voltage, just return
+ * 'UNSPEC_FAILURE'.
+ */
+ health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ break;
+ case 0x2: /* Thermal Shutdown */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case 0x3: /* Charge Safety Timer Expiration */
+ health = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
+
+ val->intval = health;
+
+ return 0;
+}
+
+static int bq24190_charger_get_online(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_SS,
+ BQ24190_REG_SS_PG_STAT_MASK,
+ BQ24190_REG_SS_PG_STAT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ val->intval = v;
+ return 0;
+}
+
+static int bq24190_charger_get_current(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int curr, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+ bq24190_ccc_ichg_values,
+ ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
+ if (ret < 0)
+ return ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ /* If FORCE_20PCT is enabled, then current is 20% of ICHG value */
+ if (v)
+ curr /= 5;
+
+ val->intval = curr;
+ return 0;
+}
+
+static int bq24190_charger_get_current_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int idx = ARRAY_SIZE(bq24190_ccc_ichg_values) - 1;
+
+ val->intval = bq24190_ccc_ichg_values[idx];
+ return 0;
+}
+
+static int bq24190_charger_set_current(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ u8 v;
+ int ret, curr = val->intval;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ /* If FORCE_20PCT is enabled, have to multiply value passed in by 5 */
+ if (v)
+ curr *= 5;
+
+ return bq24190_set_field_val(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
+ bq24190_ccc_ichg_values,
+ ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
+}
+
+static int bq24190_charger_get_voltage(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int voltage, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_CVC,
+ BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+ bq24190_cvc_vreg_values,
+ ARRAY_SIZE(bq24190_cvc_vreg_values), &voltage);
+ if (ret < 0)
+ return ret;
+
+ val->intval = voltage;
+ return 0;
+}
+
+static int bq24190_charger_get_voltage_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int idx = ARRAY_SIZE(bq24190_cvc_vreg_values) - 1;
+
+ val->intval = bq24190_cvc_vreg_values[idx];
+ return 0;
+}
+
+static int bq24190_charger_set_voltage(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_set_field_val(bdi, BQ24190_REG_CVC,
+ BQ24190_REG_CVC_VREG_MASK, BQ24190_REG_CVC_VREG_SHIFT,
+ bq24190_cvc_vreg_values,
+ ARRAY_SIZE(bq24190_cvc_vreg_values), val->intval);
+}
+
+static int bq24190_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = bq24190_charger_get_charge_type(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq24190_charger_get_health(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_charger_get_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq24190_charger_get_current(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ ret = bq24190_charger_get_current_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq24190_charger_get_voltage(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ ret = bq24190_charger_get_voltage_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bdi->model_name;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BQ24190_MANUFACTURER;
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, charger);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = bq24190_charger_set_charge_type(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq24190_charger_set_current(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq24190_charger_set_voltage(bdi, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property bq24190_charger_properties[] = {
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static char *bq24190_charger_supplied_to[] = {
+ "main-battery",
+};
+
+static void bq24190_charger_init(struct power_supply *charger)
+{
+ charger->name = "bq24190-charger";
+ charger->type = POWER_SUPPLY_TYPE_USB;
+ charger->properties = bq24190_charger_properties;
+ charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
+ charger->supplied_to = bq24190_charger_supplied_to;
+ charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
+ charger->get_property = bq24190_charger_get_property;
+ charger->set_property = bq24190_charger_set_property;
+ charger->property_is_writeable = bq24190_charger_property_is_writeable;
+}
+
+/* Battery power supply property routines */
+
+static int bq24190_battery_get_status(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 ss_reg, chrg_fault;
+ int status, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->battery_status_valid) {
+ chrg_fault = bdi->f_reg;
+ bdi->battery_status_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
+ if (ret < 0)
+ return ret;
+ }
+
+ chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
+ chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The battery must be discharging when any of these are true:
+ * - there is no good power source;
+ * - there is a charge fault.
+ * Could also be discharging when in "supplement mode" but
+ * there is no way to tell when its in that mode.
+ */
+ if (!(ss_reg & BQ24190_REG_SS_PG_STAT_MASK) || chrg_fault) {
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else {
+ ss_reg &= BQ24190_REG_SS_CHRG_STAT_MASK;
+ ss_reg >>= BQ24190_REG_SS_CHRG_STAT_SHIFT;
+
+ switch (ss_reg) {
+ case 0x0: /* Not Charging */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case 0x1: /* Pre-charge */
+ case 0x2: /* Fast Charging */
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x3: /* Charge Termination Done */
+ status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ ret = -EIO;
+ }
+ }
+
+ if (!ret)
+ val->intval = status;
+
+ return ret;
+}
+
+static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 v;
+ int health, ret;
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ if (bdi->battery_health_valid) {
+ v = bdi->f_reg;
+ bdi->battery_health_valid = false;
+ mutex_unlock(&bdi->f_reg_lock);
+ } else {
+ mutex_unlock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &v);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ } else {
+ v &= BQ24190_REG_F_NTC_FAULT_MASK;
+ v >>= BQ24190_REG_F_NTC_FAULT_SHIFT;
+
+ switch (v) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* TS1 Cold */
+ case 0x3: /* TS2 Cold */
+ case 0x5: /* Both Cold */
+ health = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case 0x2: /* TS1 Hot */
+ case 0x4: /* TS2 Hot */
+ case 0x6: /* Both Hot */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
+
+ val->intval = health;
+ return 0;
+}
+
+static int bq24190_battery_get_online(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ u8 batfet_disable;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_MOC,
+ BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+ BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, &batfet_disable);
+ if (ret < 0)
+ return ret;
+
+ val->intval = !batfet_disable;
+ return 0;
+}
+
+static int bq24190_battery_set_online(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_write_mask(bdi, BQ24190_REG_MOC,
+ BQ24190_REG_MOC_BATFET_DISABLE_MASK,
+ BQ24190_REG_MOC_BATFET_DISABLE_SHIFT, !val->intval);
+}
+
+static int bq24190_battery_get_temp_alert_max(struct bq24190_dev_info *bdi,
+ union power_supply_propval *val)
+{
+ int temp, ret;
+
+ ret = bq24190_get_field_val(bdi, BQ24190_REG_ICTRC,
+ BQ24190_REG_ICTRC_TREG_MASK,
+ BQ24190_REG_ICTRC_TREG_SHIFT,
+ bq24190_ictrc_treg_values,
+ ARRAY_SIZE(bq24190_ictrc_treg_values), &temp);
+ if (ret < 0)
+ return ret;
+
+ val->intval = temp;
+ return 0;
+}
+
+static int bq24190_battery_set_temp_alert_max(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val)
+{
+ return bq24190_set_field_val(bdi, BQ24190_REG_ICTRC,
+ BQ24190_REG_ICTRC_TREG_MASK,
+ BQ24190_REG_ICTRC_TREG_SHIFT,
+ bq24190_ictrc_treg_values,
+ ARRAY_SIZE(bq24190_ictrc_treg_values), val->intval);
+}
+
+static int bq24190_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp, union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, battery);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_get_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq24190_battery_get_status(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq24190_battery_get_health(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_battery_get_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ /* Could be Li-on or Li-polymer but no way to tell which */
+ val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = bq24190_battery_get_temp_alert_max(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24190_dev_info *bdi =
+ container_of(psy, struct bq24190_dev_info, battery);
+ int ret;
+
+ dev_dbg(bdi->dev, "prop: %d\n", psp);
+
+ pm_runtime_put_sync(bdi->dev);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq24190_battery_set_online(bdi, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = bq24190_battery_set_temp_alert_max(bdi, val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+static int bq24190_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ ret = 1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property bq24190_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
+static void bq24190_battery_init(struct power_supply *battery)
+{
+ battery->name = "bq24190-battery";
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = bq24190_battery_properties;
+ battery->num_properties = ARRAY_SIZE(bq24190_battery_properties);
+ battery->get_property = bq24190_battery_get_property;
+ battery->set_property = bq24190_battery_set_property;
+ battery->property_is_writeable = bq24190_battery_property_is_writeable;
+}
+
+static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
+{
+ struct bq24190_dev_info *bdi = data;
+ bool alert_userspace = false;
+ u8 ss_reg, f_reg;
+ int ret;
+
+ pm_runtime_get_sync(bdi->dev);
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
+ if (ret < 0) {
+ dev_err(bdi->dev, "Can't read SS reg: %d\n", ret);
+ goto out;
+ }
+
+ if (ss_reg != bdi->ss_reg) {
+ /*
+ * The device is in host mode so when PG_STAT goes from 1->0
+ * (i.e., power removed) HIZ needs to be disabled.
+ */
+ if ((bdi->ss_reg & BQ24190_REG_SS_PG_STAT_MASK) &&
+ !(ss_reg & BQ24190_REG_SS_PG_STAT_MASK)) {
+ ret = bq24190_write_mask(bdi, BQ24190_REG_ISC,
+ BQ24190_REG_ISC_EN_HIZ_MASK,
+ BQ24190_REG_ISC_EN_HIZ_SHIFT,
+ 0);
+ if (ret < 0)
+ dev_err(bdi->dev, "Can't access ISC reg: %d\n",
+ ret);
+ }
+
+ bdi->ss_reg = ss_reg;
+ alert_userspace = true;
+ }
+
+ mutex_lock(&bdi->f_reg_lock);
+
+ ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+ if (ret < 0) {
+ mutex_unlock(&bdi->f_reg_lock);
+ dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+ goto out;
+ }
+
+ if (f_reg != bdi->f_reg) {
+ bdi->f_reg = f_reg;
+ bdi->charger_health_valid = true;
+ bdi->battery_health_valid = true;
+ bdi->battery_status_valid = true;
+
+ alert_userspace = true;
+ }
+
+ mutex_unlock(&bdi->f_reg_lock);
+
+ /*
+ * Sometimes bq24190 gives a steady trickle of interrupts even
+ * though the watchdog timer is turned off and neither the STATUS
+ * nor FAULT registers have changed. Weed out these sprurious
+ * interrupts so userspace isn't alerted for no reason.
+ * In addition, the chip always generates an interrupt after
+ * register reset so we should ignore that one (the very first
+ * interrupt received).
+ */
+ if (alert_userspace && !bdi->first_time) {
+ power_supply_changed(&bdi->charger);
+ power_supply_changed(&bdi->battery);
+ bdi->first_time = false;
+ }
+
+out:
+ pm_runtime_put_sync(bdi->dev);
+
+ dev_dbg(bdi->dev, "ss_reg: 0x%02x, f_reg: 0x%02x\n", ss_reg, f_reg);
+
+ return IRQ_HANDLED;
+}
+
+static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+{
+ u8 v;
+ int ret;
+
+ pm_runtime_get_sync(bdi->dev);
+
+ /* First check that the device really is what its supposed to be */
+ ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
+ BQ24190_REG_VPRS_PN_MASK,
+ BQ24190_REG_VPRS_PN_SHIFT,
+ &v);
+ if (ret < 0)
+ goto out;
+
+ if (v != bdi->model) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = bq24190_register_reset(bdi);
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_set_mode_host(bdi);
+out:
+ pm_runtime_put_sync(bdi->dev);
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+ bdi->irq = irq_of_parse_and_map(bdi->dev->of_node, 0);
+ if (bdi->irq <= 0)
+ return -1;
+
+ return 0;
+}
+#else
+static int bq24190_setup_dt(struct bq24190_dev_info *bdi)
+{
+ return -1;
+}
+#endif
+
+static int bq24190_setup_pdata(struct bq24190_dev_info *bdi,
+ struct bq24190_platform_data *pdata)
+{
+ int ret;
+
+ if (!gpio_is_valid(pdata->gpio_int))
+ return -1;
+
+ ret = gpio_request(pdata->gpio_int, dev_name(bdi->dev));
+ if (ret < 0)
+ return -1;
+
+ ret = gpio_direction_input(pdata->gpio_int);
+ if (ret < 0)
+ goto out;
+
+ bdi->irq = gpio_to_irq(pdata->gpio_int);
+ if (!bdi->irq)
+ goto out;
+
+ bdi->gpio_int = pdata->gpio_int;
+ return 0;
+
+out:
+ gpio_free(pdata->gpio_int);
+ return -1;
+}
+
+static int bq24190_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct device *dev = &client->dev;
+ struct bq24190_platform_data *pdata = client->dev.platform_data;
+ struct bq24190_dev_info *bdi;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(dev, "No support for SMBUS_BYTE_DATA\n");
+ return -ENODEV;
+ }
+
+ bdi = devm_kzalloc(dev, sizeof(*bdi), GFP_KERNEL);
+ if (!bdi) {
+ dev_err(dev, "Can't alloc bdi struct\n");
+ return -ENOMEM;
+ }
+
+ bdi->client = client;
+ bdi->dev = dev;
+ bdi->model = id->driver_data;
+ strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
+ mutex_init(&bdi->f_reg_lock);
+ bdi->first_time = true;
+ bdi->charger_health_valid = false;
+ bdi->battery_health_valid = false;
+ bdi->battery_status_valid = false;
+
+ i2c_set_clientdata(client, bdi);
+
+ if (dev->of_node)
+ ret = bq24190_setup_dt(bdi);
+ else
+ ret = bq24190_setup_pdata(bdi, pdata);
+
+ if (ret) {
+ dev_err(dev, "Can't get irq info\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+ bq24190_irq_handler_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "bq24190-charger", bdi);
+ if (ret < 0) {
+ dev_err(dev, "Can't set up irq handler\n");
+ goto out1;
+ }
+
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
+
+ ret = bq24190_hw_init(bdi);
+ if (ret < 0) {
+ dev_err(dev, "Hardware init failed\n");
+ goto out2;
+ }
+
+ bq24190_charger_init(&bdi->charger);
+
+ ret = power_supply_register(dev, &bdi->charger);
+ if (ret) {
+ dev_err(dev, "Can't register charger\n");
+ goto out2;
+ }
+
+ bq24190_battery_init(&bdi->battery);
+
+ ret = power_supply_register(dev, &bdi->battery);
+ if (ret) {
+ dev_err(dev, "Can't register battery\n");
+ goto out3;
+ }
+
+ ret = bq24190_sysfs_create_group(bdi);
+ if (ret) {
+ dev_err(dev, "Can't create sysfs entries\n");
+ goto out4;
+ }
+
+ return 0;
+
+out4:
+ power_supply_unregister(&bdi->battery);
+out3:
+ power_supply_unregister(&bdi->charger);
+out2:
+ pm_runtime_disable(dev);
+out1:
+ if (bdi->gpio_int)
+ gpio_free(bdi->gpio_int);
+
+ return ret;
+}
+
+static int bq24190_remove(struct i2c_client *client)
+{
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ bq24190_sysfs_remove_group(bdi);
+ power_supply_unregister(&bdi->battery);
+ power_supply_unregister(&bdi->charger);
+ pm_runtime_disable(bdi->dev);
+
+ if (bdi->gpio_int)
+ gpio_free(bdi->gpio_int);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bq24190_pm_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ return 0;
+}
+
+static int bq24190_pm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+
+ bdi->charger_health_valid = false;
+ bdi->battery_health_valid = false;
+ bdi->battery_status_valid = false;
+
+ pm_runtime_get_sync(bdi->dev);
+ bq24190_register_reset(bdi);
+ pm_runtime_put_sync(bdi->dev);
+
+ /* Things may have changed while suspended so alert upper layer */
+ power_supply_changed(&bdi->charger);
+ power_supply_changed(&bdi->battery);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bq24190_pm_ops, bq24190_pm_suspend, bq24190_pm_resume);
+
+/*
+ * Only support the bq24190 right now. The bq24192, bq24192i, and bq24193
+ * are similar but not identical so the driver needs to be extended to
+ * support them.
+ */
+static const struct i2c_device_id bq24190_i2c_ids[] = {
+ { "bq24190", BQ24190_REG_VPRS_PN_24190 },
+ { },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id bq24190_of_match[] = {
+ { .compatible = "ti,bq24190", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bq24190_of_match);
+#else
+static const struct of_device_id bq24190_of_match[] = {
+ { },
+};
+#endif
+
+static struct i2c_driver bq24190_driver = {
+ .probe = bq24190_probe,
+ .remove = bq24190_remove,
+ .id_table = bq24190_i2c_ids,
+ .driver = {
+ .name = "bq24190-charger",
+ .owner = THIS_MODULE,
+ .pm = &bq24190_pm_ops,
+ .of_match_table = of_match_ptr(bq24190_of_match),
+ },
+};
+module_i2c_driver(bq24190_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
+MODULE_ALIAS("i2c:bq24190-charger");
+MODULE_DESCRIPTION("TI BQ24190 Charger Driver");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index c58d0e3..d02ae02 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -287,7 +287,7 @@ static struct gpio collie_batt_gpios[] = {
};
#ifdef CONFIG_PM
-static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
+static int collie_bat_suspend(struct ucb1x00_dev *dev)
{
/* flush all pending status updates */
flush_work(&bat_work);
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 0ee1e14..b4513f2 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -458,6 +458,7 @@ max8925_power_dt_init(struct platform_device *pdev)
of_property_read_u32(np, "fast-charge", &fast_charge);
of_property_read_u32(np, "no-insert-detect", &no_insert_detect);
of_property_read_u32(np, "no-temp-support", &no_temp_support);
+ of_node_put(np);
pdata->batt_detect = batt_detect;
pdata->fast_charge = fast_charge;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 3b2d5df..00e6672 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -67,23 +67,42 @@ static int __power_supply_changed_work(struct device *dev, void *data)
static void power_supply_changed_work(struct work_struct *work)
{
+ unsigned long flags;
struct power_supply *psy = container_of(work, struct power_supply,
changed_work);
dev_dbg(psy->dev, "%s\n", __func__);
- class_for_each_device(power_supply_class, NULL, psy,
- __power_supply_changed_work);
-
- power_supply_update_leds(psy);
-
- kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ if (psy->changed) {
+ psy->changed = false;
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
+ class_for_each_device(power_supply_class, NULL, psy,
+ __power_supply_changed_work);
+ power_supply_update_leds(psy);
+ kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ }
+ /*
+ * Dependent power supplies (e.g. battery) may have changed state
+ * as a result of this event, so poll again and hold the
+ * wakeup_source until all events are processed.
+ */
+ if (!psy->changed)
+ pm_relax(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
}
void power_supply_changed(struct power_supply *psy)
{
+ unsigned long flags;
+
dev_dbg(psy->dev, "%s\n", __func__);
+ spin_lock_irqsave(&psy->changed_lock, flags);
+ psy->changed = true;
+ pm_stay_awake(psy->dev);
+ spin_unlock_irqrestore(&psy->changed_lock, flags);
schedule_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -500,6 +519,11 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto check_supplies_failed;
}
+ spin_lock_init(&psy->changed_lock);
+ rc = device_init_wakeup(dev, true);
+ if (rc)
+ goto wakeup_init_failed;
+
rc = kobject_set_name(&dev->kobj, "%s", psy->name);
if (rc)
goto kobject_set_name_failed;
@@ -529,6 +553,7 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
+wakeup_init_failed:
device_del(dev);
kobject_set_name_failed:
device_add_failed:
@@ -546,6 +571,7 @@ void power_supply_unregister(struct power_supply *psy)
power_supply_remove_triggers(psy);
psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
+ device_init_wakeup(psy->dev, false);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 29178f7..44420d1 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -118,7 +118,7 @@ static ssize_t power_supply_store_property(struct device *dev,
long long_val;
/* TODO: support other types than int */
- ret = strict_strtol(buf, 10, &long_val);
+ ret = kstrtol(buf, 10, &long_val);
if (ret < 0)
return ret;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ee039dc..9b3ea53 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -14,6 +14,12 @@ config POWER_RESET_GPIO
If your board needs a GPIO high/low to power down, say Y and
create a binding in your devicetree.
+config POWER_RESET_MSM
+ bool "Qualcomm MSM power-off driver"
+ depends on POWER_RESET && ARCH_MSM
+ help
+ Power off and restart support for Qualcomm boards.
+
config POWER_RESET_QNAP
bool "QNAP power-off driver"
depends on OF_GPIO && POWER_RESET && PLAT_ORION
@@ -34,7 +40,14 @@ config POWER_RESET_RESTART
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
- depends on POWER_RESET
+ depends on POWER_RESET && VEXPRESS_CONFIG
help
Power off and reset support for the ARM Ltd. Versatile
Express boards.
+
+config POWER_RESET_XGENE
+ bool "APM SoC X-Gene reset driver"
+ depends on ARM64
+ depends on POWER_RESET
+ help
+ Reboot support for the APM SoC X-Gene Eval boards.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 372807f..3e6ed88 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
+obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
new file mode 100644
index 0000000..774f9a3
--- /dev/null
+++ b/drivers/power/reset/msm-poweroff.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+static void __iomem *msm_ps_hold;
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ writel(0, msm_ps_hold);
+ mdelay(10000);
+}
+
+static void do_msm_poweroff(void)
+{
+ /* TODO: Add poweroff capability */
+ do_msm_restart(REBOOT_HARD, NULL);
+}
+
+static int msm_restart_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ msm_ps_hold = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(msm_ps_hold))
+ return PTR_ERR(msm_ps_hold);
+
+ pm_power_off = do_msm_poweroff;
+ arm_pm_restart = do_msm_restart;
+ return 0;
+}
+
+static const struct of_device_id of_msm_restart_match[] = {
+ { .compatible = "qcom,pshold", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_msm_restart_match);
+
+static struct platform_driver msm_restart_driver = {
+ .probe = msm_restart_probe,
+ .driver = {
+ .name = "msm-restart",
+ .of_match_table = of_match_ptr(of_msm_restart_match),
+ },
+};
+
+static int __init msm_restart_init(void)
+{
+ return platform_driver_register(&msm_restart_driver);
+}
+device_initcall(msm_restart_init);
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
new file mode 100644
index 0000000..ecd55f8
--- /dev/null
+++ b/drivers/power/reset/xgene-reboot.c
@@ -0,0 +1,103 @@
+/*
+ * AppliedMicro X-Gene SoC Reboot Driver
+ *
+ * Copyright (c) 2013, Applied Micro Circuits Corporation
+ * Author: Feng Kan <fkan@apm.com>
+ * Author: Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * This driver provides system reboot functionality for APM X-Gene SoC.
+ * For system shutdown, this is board specify. If a board designer
+ * implements GPIO shutdown, use the gpio-poweroff.c driver.
+ */
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <asm/system_misc.h>
+
+struct xgene_reboot_context {
+ struct platform_device *pdev;
+ void *csr;
+ u32 mask;
+};
+
+static struct xgene_reboot_context *xgene_restart_ctx;
+
+static void xgene_restart(char str, const char *cmd)
+{
+ struct xgene_reboot_context *ctx = xgene_restart_ctx;
+ unsigned long timeout;
+
+ /* Issue the reboot */
+ if (ctx)
+ writel(ctx->mask, ctx->csr);
+
+ timeout = jiffies + HZ;
+ while (time_before(jiffies, timeout))
+ cpu_relax();
+
+ dev_emerg(&ctx->pdev->dev, "Unable to restart system\n");
+}
+
+static int xgene_reboot_probe(struct platform_device *pdev)
+{
+ struct xgene_reboot_context *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(&pdev->dev, "out of memory for context\n");
+ return -ENODEV;
+ }
+
+ ctx->csr = of_iomap(pdev->dev.of_node, 0);
+ if (!ctx->csr) {
+ devm_kfree(&pdev->dev, ctx);
+ dev_err(&pdev->dev, "can not map resource\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ ctx->mask = 0xFFFFFFFF;
+
+ ctx->pdev = pdev;
+ arm_pm_restart = xgene_restart;
+ xgene_restart_ctx = ctx;
+
+ return 0;
+}
+
+static struct of_device_id xgene_reboot_of_match[] = {
+ { .compatible = "apm,xgene-reboot" },
+ {}
+};
+
+static struct platform_driver xgene_reboot_driver = {
+ .probe = xgene_reboot_probe,
+ .driver = {
+ .name = "xgene-reboot",
+ .of_match_table = xgene_reboot_of_match,
+ },
+};
+
+static int __init xgene_reboot_init(void)
+{
+ return platform_driver_register(&xgene_reboot_driver);
+}
+device_initcall(xgene_reboot_init);
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index 8a6288d..1bc5857 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -25,6 +25,10 @@
#include <linux/slab.h>
#include <linux/i2c/twl4030-madc.h>
+/* RX51 specific channels */
+#define TWL4030_MADC_BTEMP_RX51 TWL4030_MADC_ADCIN0
+#define TWL4030_MADC_BCI_RX51 TWL4030_MADC_ADCIN4
+
struct rx51_device_info {
struct device *dev;
struct power_supply bat;
@@ -37,7 +41,7 @@ static int rx51_battery_read_adc(int channel)
{
struct twl4030_madc_request req;
- req.channels = 1 << channel;
+ req.channels = channel;
req.do_avg = 1;
req.method = TWL4030_MADC_SW1;
req.func_cb = NULL;
@@ -47,7 +51,7 @@ static int rx51_battery_read_adc(int channel)
if (twl4030_madc_conversion(&req) <= 0)
return -ENODATA;
- return req.rbuf[channel];
+ return req.rbuf[ffs(channel) - 1];
}
/*
@@ -56,7 +60,7 @@ static int rx51_battery_read_adc(int channel)
*/
static int rx51_battery_read_voltage(struct rx51_device_info *di)
{
- int voltage = rx51_battery_read_adc(12);
+ int voltage = rx51_battery_read_adc(TWL4030_MADC_VBAT);
if (voltage < 0)
return voltage;
@@ -108,7 +112,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
{
int min = 0;
int max = ARRAY_SIZE(rx51_temp_table2) - 1;
- int raw = rx51_battery_read_adc(0);
+ int raw = rx51_battery_read_adc(TWL4030_MADC_BTEMP_RX51);
/* Zero and negative values are undefined */
if (raw <= 0)
@@ -142,7 +146,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
*/
static int rx51_battery_read_capacity(struct rx51_device_info *di)
{
- int capacity = rx51_battery_read_adc(4);
+ int capacity = rx51_battery_read_adc(TWL4030_MADC_BCI_RX51);
if (capacity < 0)
return capacity;
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 0224de5..f4d80df 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -150,7 +150,7 @@ static void tosa_bat_external_power_changed(struct power_supply *psy)
static irqreturn_t tosa_bat_gpio_isr(int irq, void *data)
{
- pr_info("tosa_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
+ pr_info("tosa_bat_gpio irq\n");
schedule_work(&bat_work);
return IRQ_HANDLED;
}
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index be98e70..d98abe9 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -189,7 +189,12 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
/* Need to keep regulator on */
if (!bci->usb_enabled) {
- regulator_enable(bci->usb_reg);
+ ret = regulator_enable(bci->usb_reg);
+ if (ret) {
+ dev_err(bci->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
bci->usb_enabled = 1;
}
diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
new file mode 100644
index 0000000..7ef445a
--- /dev/null
+++ b/drivers/power/twl4030_madc_battery.c
@@ -0,0 +1,245 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Lukas Märdian <lukas@goldelico.com>
+ *
+ * Based on dumb driver for gta01 battery
+ * Copyright 2009 Openmoko, Inc
+ * Balaji Rao <balajirrao@openmoko.org>
+ */
+
+#include <linux/module.h>
+#include <linux/param.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/i2c/twl4030-madc.h>
+#include <linux/power/twl4030_madc_battery.h>
+
+struct twl4030_madc_battery {
+ struct power_supply psy;
+ struct twl4030_madc_bat_platform_data *pdata;
+};
+
+static enum power_supply_property twl4030_madc_bat_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+};
+
+static int madc_read(int index)
+{
+ struct twl4030_madc_request req;
+ int val;
+
+ req.channels = index;
+ req.method = TWL4030_MADC_SW2;
+ req.type = TWL4030_MADC_WAIT;
+ req.do_avg = 0;
+ req.raw = false;
+ req.func_cb = NULL;
+
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+
+ return req.rbuf[ffs(index) - 1];
+}
+
+static int twl4030_madc_bat_get_charging_status(void)
+{
+ return (madc_read(TWL4030_MADC_ICHG) > 0) ? 1 : 0;
+}
+
+static int twl4030_madc_bat_get_voltage(void)
+{
+ return madc_read(TWL4030_MADC_VBAT);
+}
+
+static int twl4030_madc_bat_get_current(void)
+{
+ return madc_read(TWL4030_MADC_ICHG) * 1000;
+}
+
+static int twl4030_madc_bat_get_temp(void)
+{
+ return madc_read(TWL4030_MADC_BTEMP) * 10;
+}
+
+static int twl4030_madc_bat_voltscale(struct twl4030_madc_battery *bat,
+ int volt)
+{
+ struct twl4030_madc_bat_calibration *calibration;
+ int i, res = 0;
+
+ /* choose charging curve */
+ if (twl4030_madc_bat_get_charging_status())
+ calibration = bat->pdata->charging;
+ else
+ calibration = bat->pdata->discharging;
+
+ if (volt > calibration[0].voltage) {
+ res = calibration[0].level;
+ } else {
+ for (i = 0; calibration[i+1].voltage >= 0; i++) {
+ if (volt <= calibration[i].voltage &&
+ volt >= calibration[i+1].voltage) {
+ /* interval found - interpolate within range */
+ res = calibration[i].level -
+ ((calibration[i].voltage - volt) *
+ (calibration[i].level -
+ calibration[i+1].level)) /
+ (calibration[i].voltage -
+ calibration[i+1].voltage);
+ break;
+ }
+ }
+ }
+ return res;
+}
+
+static int twl4030_madc_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct twl4030_madc_battery *bat = container_of(psy,
+ struct twl4030_madc_battery, psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage()) > 95)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else {
+ if (twl4030_madc_bat_get_charging_status())
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = twl4030_madc_bat_get_voltage() * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = twl4030_madc_bat_get_current();
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* assume battery is always present */
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW: {
+ int percent = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ val->intval = (percent * bat->pdata->capacity) / 100;
+ break;
+ }
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = bat->pdata->capacity;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = twl4030_madc_bat_get_temp();
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: {
+ int percent = twl4030_madc_bat_voltscale(bat,
+ twl4030_madc_bat_get_voltage());
+ /* in mAh */
+ int chg = (percent * (bat->pdata->capacity/1000))/100;
+
+ /* assume discharge with 400 mA (ca. 1.5W) */
+ val->intval = (3600l * chg) / 400;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void twl4030_madc_bat_ext_changed(struct power_supply *psy)
+{
+ struct twl4030_madc_battery *bat = container_of(psy,
+ struct twl4030_madc_battery, psy);
+
+ power_supply_changed(&bat->psy);
+}
+
+static int twl4030_cmp(const void *a, const void *b)
+{
+ return ((struct twl4030_madc_bat_calibration *)b)->voltage -
+ ((struct twl4030_madc_bat_calibration *)a)->voltage;
+}
+
+static int twl4030_madc_battery_probe(struct platform_device *pdev)
+{
+ struct twl4030_madc_battery *twl4030_madc_bat;
+ struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
+
+ twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ if (!twl4030_madc_bat)
+ return -ENOMEM;
+
+ twl4030_madc_bat->psy.name = "twl4030_battery";
+ twl4030_madc_bat->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ twl4030_madc_bat->psy.properties = twl4030_madc_bat_props;
+ twl4030_madc_bat->psy.num_properties =
+ ARRAY_SIZE(twl4030_madc_bat_props);
+ twl4030_madc_bat->psy.get_property = twl4030_madc_bat_get_property;
+ twl4030_madc_bat->psy.external_power_changed =
+ twl4030_madc_bat_ext_changed;
+
+ /* sort charging and discharging calibration data */
+ sort(pdata->charging, pdata->charging_size,
+ sizeof(struct twl4030_madc_bat_calibration),
+ twl4030_cmp, NULL);
+ sort(pdata->discharging, pdata->discharging_size,
+ sizeof(struct twl4030_madc_bat_calibration),
+ twl4030_cmp, NULL);
+
+ twl4030_madc_bat->pdata = pdata;
+ platform_set_drvdata(pdev, twl4030_madc_bat);
+ power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
+
+ return 0;
+}
+
+static int twl4030_madc_battery_remove(struct platform_device *pdev)
+{
+ struct twl4030_madc_battery *bat = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&bat->psy);
+ kfree(bat);
+
+ return 0;
+}
+
+static struct platform_driver twl4030_madc_battery_driver = {
+ .driver = {
+ .name = "twl4030_madc_battery",
+ },
+ .probe = twl4030_madc_battery_probe,
+ .remove = twl4030_madc_battery_remove,
+};
+module_platform_driver(twl4030_madc_battery_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lukas Märdian <lukas@goldelico.com>");
+MODULE_DESCRIPTION("twl4030_madc battery driver");
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index feca317..92bd22c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -645,7 +645,7 @@ dasd_diag_init(void)
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
@@ -655,7 +655,7 @@ static void __exit
dasd_diag_cleanup(void)
{
unregister_external_interrupt(0x2603, dasd_ext_handler);
- service_subclass_irq_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
dasd_diag_discipline_pointer = NULL;
}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 96e52bf..f93cc32 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -524,20 +524,20 @@ static const struct file_operations fs3270_fops = {
.llseek = no_llseek,
};
-void fs3270_create_cb(int minor)
+static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
-void fs3270_destroy_cb(int minor)
+static void fs3270_destroy_cb(int minor)
{
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
-struct raw3270_notifier fs3270_notifier =
+static struct raw3270_notifier fs3270_notifier =
{
.create = fs3270_create_cb,
.destroy = fs3270_destroy_cb,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 3e4fb4e..a3aa374 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -910,12 +910,12 @@ sclp_check_interface(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
/* Wait for signal from interrupt or timeout */
sclp_sync_wait();
/* Disable service-signal interruption - needs to happen
* with IRQs enabled. */
- service_subclass_irq_unregister();
+ irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
spin_lock_irqsave(&sclp_lock, flags);
del_timer(&sclp_request_timer);
if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1131,7 +1131,7 @@ sclp_init(void)
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal external interruption - needs to happen with
* IRQs enabled. */
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
sclp_init_mask(1);
return 0;
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index cee69da..a0f47c8 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1845,17 +1845,17 @@ static const struct tty_operations tty3270_ops = {
.set_termios = tty3270_set_termios
};
-void tty3270_create_cb(int minor)
+static void tty3270_create_cb(int minor)
{
tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
}
-void tty3270_destroy_cb(int minor)
+static void tty3270_destroy_cb(int minor)
{
tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
}
-struct raw3270_notifier tty3270_notifier =
+static struct raw3270_notifier tty3270_notifier =
{
.create = tty3270_create_cb,
.destroy = tty3270_destroy_cb,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index d4174b8..02300dc 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -413,7 +413,7 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
register unsigned long reg2 asm ("2") = (unsigned long) msg;
register unsigned long reg3 asm ("3") = (unsigned long) length;
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
if (special == 1)
reg0 |= 0x400000UL;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 2ea6165..af2166f 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -472,7 +472,7 @@ static int __init kvm_devices_init(void)
INIT_WORK(&hotplug_work, hotplug_devices);
- service_subclass_irq_register();
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_interrupt(0x2603, kvm_extint_handler);
scan_devices();
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index dbfe2c1..b269abd 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -952,7 +952,7 @@ static struct fb_ops ps3fb_ops = {
.fb_compat_ioctl = ps3fb_ioctl
};
-static struct fb_fix_screeninfo ps3fb_fix __initdata = {
+static struct fb_fix_screeninfo ps3fb_fix = {
.id = DEVICE_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,