mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-20 15:17:07 +07:00
62e59c4e69
Now that we've gotten rid of clk_readl() we can remove io.h from the clk-provider header and push out the io.h include to any code that isn't already including the io.h header but using things like readl/writel, etc. Found with this grep: git grep -l clk-provider.h | grep '.c$' | xargs git grep -L 'linux/io.h' | \ xargs git grep -l \ -e '\<__iowrite32_copy\>' --or \ -e '\<__ioread32_copy\>' --or \ -e '\<__iowrite64_copy\>' --or \ -e '\<ioremap_page_range\>' --or \ -e '\<ioremap_huge_init\>' --or \ -e '\<arch_ioremap_pud_supported\>' --or \ -e '\<arch_ioremap_pmd_supported\>' --or \ -e '\<devm_ioport_map\>' --or \ -e '\<devm_ioport_unmap\>' --or \ -e '\<IOMEM_ERR_PTR\>' --or \ -e '\<devm_ioremap\>' --or \ -e '\<devm_ioremap_nocache\>' --or \ -e '\<devm_ioremap_wc\>' --or \ -e '\<devm_iounmap\>' --or \ -e '\<devm_ioremap_release\>' --or \ -e '\<devm_memremap\>' --or \ -e '\<devm_memunmap\>' --or \ -e '\<__devm_memremap_pages\>' --or \ -e '\<pci_remap_cfgspace\>' --or \ -e '\<arch_has_dev_port\>' --or \ -e '\<arch_phys_wc_add\>' --or \ -e '\<arch_phys_wc_del\>' --or \ -e '\<memremap\>' --or \ -e '\<memunmap\>' --or \ -e '\<arch_io_reserve_memtype_wc\>' --or \ -e '\<arch_io_free_memtype_wc\>' --or \ -e '\<__io_aw\>' --or \ -e '\<__io_pbw\>' --or \ -e '\<__io_paw\>' --or \ -e '\<__io_pbr\>' --or \ -e '\<__io_par\>' --or \ -e '\<__raw_readb\>' --or \ -e '\<__raw_readw\>' --or \ -e '\<__raw_readl\>' --or \ -e '\<__raw_readq\>' --or \ -e '\<__raw_writeb\>' --or \ -e '\<__raw_writew\>' --or \ -e '\<__raw_writel\>' --or \ -e '\<__raw_writeq\>' --or \ -e '\<readb\>' --or \ -e '\<readw\>' --or \ -e '\<readl\>' --or \ -e '\<readq\>' --or \ -e '\<writeb\>' --or \ -e '\<writew\>' --or \ -e '\<writel\>' --or \ -e '\<writeq\>' --or \ -e '\<readb_relaxed\>' --or \ -e '\<readw_relaxed\>' --or \ -e '\<readl_relaxed\>' --or \ -e '\<readq_relaxed\>' --or \ -e '\<writeb_relaxed\>' --or \ -e '\<writew_relaxed\>' --or \ -e '\<writel_relaxed\>' --or \ -e '\<writeq_relaxed\>' --or \ -e '\<readsb\>' --or \ -e '\<readsw\>' --or \ -e '\<readsl\>' --or \ -e '\<readsq\>' --or \ -e '\<writesb\>' --or \ -e '\<writesw\>' --or \ -e '\<writesl\>' --or \ -e '\<writesq\>' --or \ -e '\<inb\>' --or \ -e '\<inw\>' --or \ -e '\<inl\>' --or \ -e '\<outb\>' --or \ -e '\<outw\>' --or \ -e '\<outl\>' --or \ -e '\<inb_p\>' --or \ -e '\<inw_p\>' --or \ -e '\<inl_p\>' --or \ -e '\<outb_p\>' --or \ -e '\<outw_p\>' --or \ -e '\<outl_p\>' --or \ -e '\<insb\>' --or \ -e '\<insw\>' --or \ -e '\<insl\>' --or \ -e '\<outsb\>' --or \ -e '\<outsw\>' --or \ -e '\<outsl\>' --or \ -e '\<insb_p\>' --or \ -e '\<insw_p\>' --or \ -e '\<insl_p\>' --or \ -e '\<outsb_p\>' --or \ -e '\<outsw_p\>' --or \ -e '\<outsl_p\>' --or \ -e '\<ioread8\>' --or \ -e '\<ioread16\>' --or \ -e '\<ioread32\>' --or \ -e '\<ioread64\>' --or \ -e '\<iowrite8\>' --or \ -e '\<iowrite16\>' --or \ -e '\<iowrite32\>' --or \ -e '\<iowrite64\>' --or \ -e '\<ioread16be\>' --or \ -e '\<ioread32be\>' --or \ -e '\<ioread64be\>' --or \ -e '\<iowrite16be\>' --or \ -e '\<iowrite32be\>' --or \ -e '\<iowrite64be\>' --or \ -e '\<ioread8_rep\>' --or \ -e '\<ioread16_rep\>' --or \ -e '\<ioread32_rep\>' --or \ -e '\<ioread64_rep\>' --or \ -e '\<iowrite8_rep\>' --or \ -e '\<iowrite16_rep\>' --or \ -e '\<iowrite32_rep\>' --or \ -e '\<iowrite64_rep\>' --or \ -e '\<__io_virt\>' --or \ -e '\<pci_iounmap\>' --or \ -e '\<virt_to_phys\>' --or \ -e '\<phys_to_virt\>' --or \ -e '\<ioremap_uc\>' --or \ -e '\<ioremap\>' --or \ -e '\<__ioremap\>' --or \ -e '\<iounmap\>' --or \ -e '\<ioremap\>' --or \ -e '\<ioremap_nocache\>' --or \ -e '\<ioremap_uc\>' --or \ -e '\<ioremap_wc\>' --or \ -e '\<ioremap_wc\>' --or \ -e '\<ioremap_wt\>' --or \ -e '\<ioport_map\>' --or \ -e '\<ioport_unmap\>' --or \ -e '\<ioport_map\>' --or \ -e '\<ioport_unmap\>' --or \ -e '\<xlate_dev_kmem_ptr\>' --or \ -e '\<xlate_dev_mem_ptr\>' --or \ -e '\<unxlate_dev_mem_ptr\>' --or \ -e '\<virt_to_bus\>' --or \ -e '\<bus_to_virt\>' --or \ -e '\<memset_io\>' --or \ -e '\<memcpy_fromio\>' --or \ -e '\<memcpy_toio\>' I also reordered a couple includes when they weren't alphabetical and removed clk.h from kona, replacing it with clk-provider.h because that driver doesn't use clk consumer APIs. Acked-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: Chen-Yu Tsai <wens@csie.org> Acked-by: Maxime Ripard <maxime.ripard@bootlin.com> Acked-by: Tero Kristo <t-kristo@ti.com> Acked-by: Sekhar Nori <nsekhar@ti.com> Cc: Krzysztof Kozlowski <krzk@kernel.org> Acked-by: Mark Brown <broonie@kernel.org> Cc: Chris Zankel <chris@zankel.net> Acked-by: Max Filippov <jcmvbkbc@gmail.com> Acked-by: John Crispin <john@phrozen.org> Acked-by: Heiko Stuebner <heiko@sntech.de> Signed-off-by: Stephen Boyd <sboyd@kernel.org>
229 lines
5.6 KiB
C
229 lines
5.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
|
|
*/
|
|
|
|
#include <linux/clk-provider.h>
|
|
#include <linux/io.h>
|
|
#include <linux/slab.h>
|
|
#include "clk.h"
|
|
|
|
#define div_mask(width) ((1 << (width)) - 1)
|
|
|
|
static bool _is_best_half_div(unsigned long rate, unsigned long now,
|
|
unsigned long best, unsigned long flags)
|
|
{
|
|
if (flags & CLK_DIVIDER_ROUND_CLOSEST)
|
|
return abs(rate - now) < abs(rate - best);
|
|
|
|
return now <= rate && now > best;
|
|
}
|
|
|
|
static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
|
|
unsigned long parent_rate)
|
|
{
|
|
struct clk_divider *divider = to_clk_divider(hw);
|
|
unsigned int val;
|
|
|
|
val = readl(divider->reg) >> divider->shift;
|
|
val &= div_mask(divider->width);
|
|
val = val * 2 + 3;
|
|
|
|
return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
|
|
}
|
|
|
|
static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
|
|
unsigned long *best_parent_rate, u8 width,
|
|
unsigned long flags)
|
|
{
|
|
unsigned int i, bestdiv = 0;
|
|
unsigned long parent_rate, best = 0, now, maxdiv;
|
|
unsigned long parent_rate_saved = *best_parent_rate;
|
|
|
|
if (!rate)
|
|
rate = 1;
|
|
|
|
maxdiv = div_mask(width);
|
|
|
|
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
|
|
parent_rate = *best_parent_rate;
|
|
bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
|
|
if (bestdiv < 3)
|
|
bestdiv = 0;
|
|
else
|
|
bestdiv = (bestdiv - 3) / 2;
|
|
bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
|
|
return bestdiv;
|
|
}
|
|
|
|
/*
|
|
* The maximum divider we can use without overflowing
|
|
* unsigned long in rate * i below
|
|
*/
|
|
maxdiv = min(ULONG_MAX / rate, maxdiv);
|
|
|
|
for (i = 0; i <= maxdiv; i++) {
|
|
if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
|
|
/*
|
|
* It's the most ideal case if the requested rate can be
|
|
* divided from parent clock without needing to change
|
|
* parent rate, so return the divider immediately.
|
|
*/
|
|
*best_parent_rate = parent_rate_saved;
|
|
return i;
|
|
}
|
|
parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
|
|
((u64)rate * (i * 2 + 3)) / 2);
|
|
now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
|
|
(i * 2 + 3));
|
|
|
|
if (_is_best_half_div(rate, now, best, flags)) {
|
|
bestdiv = i;
|
|
best = now;
|
|
*best_parent_rate = parent_rate;
|
|
}
|
|
}
|
|
|
|
if (!bestdiv) {
|
|
bestdiv = div_mask(width);
|
|
*best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
|
|
}
|
|
|
|
return bestdiv;
|
|
}
|
|
|
|
static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
|
|
unsigned long *prate)
|
|
{
|
|
struct clk_divider *divider = to_clk_divider(hw);
|
|
int div;
|
|
|
|
div = clk_half_divider_bestdiv(hw, rate, prate,
|
|
divider->width,
|
|
divider->flags);
|
|
|
|
return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
|
|
}
|
|
|
|
static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
unsigned long parent_rate)
|
|
{
|
|
struct clk_divider *divider = to_clk_divider(hw);
|
|
unsigned int value;
|
|
unsigned long flags = 0;
|
|
u32 val;
|
|
|
|
value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
|
|
value = (value - 3) / 2;
|
|
value = min_t(unsigned int, value, div_mask(divider->width));
|
|
|
|
if (divider->lock)
|
|
spin_lock_irqsave(divider->lock, flags);
|
|
else
|
|
__acquire(divider->lock);
|
|
|
|
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
|
|
val = div_mask(divider->width) << (divider->shift + 16);
|
|
} else {
|
|
val = readl(divider->reg);
|
|
val &= ~(div_mask(divider->width) << divider->shift);
|
|
}
|
|
val |= value << divider->shift;
|
|
writel(val, divider->reg);
|
|
|
|
if (divider->lock)
|
|
spin_unlock_irqrestore(divider->lock, flags);
|
|
else
|
|
__release(divider->lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
const struct clk_ops clk_half_divider_ops = {
|
|
.recalc_rate = clk_half_divider_recalc_rate,
|
|
.round_rate = clk_half_divider_round_rate,
|
|
.set_rate = clk_half_divider_set_rate,
|
|
};
|
|
EXPORT_SYMBOL_GPL(clk_half_divider_ops);
|
|
|
|
/**
|
|
* Register a clock branch.
|
|
* Most clock branches have a form like
|
|
*
|
|
* src1 --|--\
|
|
* |M |--[GATE]-[DIV]-
|
|
* src2 --|--/
|
|
*
|
|
* sometimes without one of those components.
|
|
*/
|
|
struct clk *rockchip_clk_register_halfdiv(const char *name,
|
|
const char *const *parent_names,
|
|
u8 num_parents, void __iomem *base,
|
|
int muxdiv_offset, u8 mux_shift,
|
|
u8 mux_width, u8 mux_flags,
|
|
u8 div_shift, u8 div_width,
|
|
u8 div_flags, int gate_offset,
|
|
u8 gate_shift, u8 gate_flags,
|
|
unsigned long flags,
|
|
spinlock_t *lock)
|
|
{
|
|
struct clk *clk;
|
|
struct clk_mux *mux = NULL;
|
|
struct clk_gate *gate = NULL;
|
|
struct clk_divider *div = NULL;
|
|
const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
|
|
*gate_ops = NULL;
|
|
|
|
if (num_parents > 1) {
|
|
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
|
|
if (!mux)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
mux->reg = base + muxdiv_offset;
|
|
mux->shift = mux_shift;
|
|
mux->mask = BIT(mux_width) - 1;
|
|
mux->flags = mux_flags;
|
|
mux->lock = lock;
|
|
mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
|
|
: &clk_mux_ops;
|
|
}
|
|
|
|
if (gate_offset >= 0) {
|
|
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
|
|
if (!gate)
|
|
goto err_gate;
|
|
|
|
gate->flags = gate_flags;
|
|
gate->reg = base + gate_offset;
|
|
gate->bit_idx = gate_shift;
|
|
gate->lock = lock;
|
|
gate_ops = &clk_gate_ops;
|
|
}
|
|
|
|
if (div_width > 0) {
|
|
div = kzalloc(sizeof(*div), GFP_KERNEL);
|
|
if (!div)
|
|
goto err_div;
|
|
|
|
div->flags = div_flags;
|
|
div->reg = base + muxdiv_offset;
|
|
div->shift = div_shift;
|
|
div->width = div_width;
|
|
div->lock = lock;
|
|
div_ops = &clk_half_divider_ops;
|
|
}
|
|
|
|
clk = clk_register_composite(NULL, name, parent_names, num_parents,
|
|
mux ? &mux->hw : NULL, mux_ops,
|
|
div ? &div->hw : NULL, div_ops,
|
|
gate ? &gate->hw : NULL, gate_ops,
|
|
flags);
|
|
|
|
return clk;
|
|
err_div:
|
|
kfree(gate);
|
|
err_gate:
|
|
kfree(mux);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|