2018-05-12 00:03:17 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2011-02-18 00:52:03 +07:00
|
|
|
/*
|
|
|
|
* OMAP hardware spinlock driver
|
|
|
|
*
|
2015-03-05 09:01:16 +07:00
|
|
|
* Copyright (C) 2010-2015 Texas Instruments Incorporated - http://www.ti.com
|
2011-02-18 00:52:03 +07:00
|
|
|
*
|
|
|
|
* Contact: Simon Que <sque@ti.com>
|
|
|
|
* Hari Kanigeri <h-kanigeri2@ti.com>
|
|
|
|
* Ohad Ben-Cohen <ohad@wizery.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/hwspinlock.h>
|
2015-03-05 09:01:16 +07:00
|
|
|
#include <linux/of.h>
|
2011-02-18 00:52:03 +07:00
|
|
|
#include <linux/platform_device.h>
|
|
|
|
|
|
|
|
#include "hwspinlock_internal.h"
|
|
|
|
|
|
|
|
/* Spinlock register offsets */
|
|
|
|
#define SYSSTATUS_OFFSET 0x0014
|
|
|
|
#define LOCK_BASE_OFFSET 0x0800
|
|
|
|
|
|
|
|
#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
|
|
|
|
|
|
|
|
/* Possible values of SPINLOCK_LOCK_REG */
|
|
|
|
#define SPINLOCK_NOTTAKEN (0) /* free */
|
|
|
|
#define SPINLOCK_TAKEN (1) /* locked */
|
|
|
|
|
|
|
|
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
|
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
void __iomem *lock_addr = lock->priv;
|
2011-02-18 00:52:03 +07:00
|
|
|
|
|
|
|
/* attempt to acquire the lock by reading its value */
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
return (SPINLOCK_NOTTAKEN == readl(lock_addr));
|
2011-02-18 00:52:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
|
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
void __iomem *lock_addr = lock->priv;
|
2011-02-18 00:52:03 +07:00
|
|
|
|
|
|
|
/* release the lock by writing 0 to it */
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
writel(SPINLOCK_NOTTAKEN, lock_addr);
|
2011-02-18 00:52:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* relax the OMAP interconnect while spinning on it.
|
|
|
|
*
|
|
|
|
* The specs recommended that the retry delay time will be
|
|
|
|
* just over half of the time that a requester would be
|
|
|
|
* expected to hold the lock.
|
|
|
|
*
|
|
|
|
* The number below is taken from an hardware specs example,
|
|
|
|
* obviously it is somewhat arbitrary.
|
|
|
|
*/
|
|
|
|
static void omap_hwspinlock_relax(struct hwspinlock *lock)
|
|
|
|
{
|
|
|
|
ndelay(50);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct hwspinlock_ops omap_hwspinlock_ops = {
|
|
|
|
.trylock = omap_hwspinlock_trylock,
|
|
|
|
.unlock = omap_hwspinlock_unlock,
|
|
|
|
.relax = omap_hwspinlock_relax,
|
|
|
|
};
|
|
|
|
|
2012-11-20 01:23:22 +07:00
|
|
|
static int omap_hwspinlock_probe(struct platform_device *pdev)
|
2011-02-18 00:52:03 +07:00
|
|
|
{
|
2015-03-05 09:01:16 +07:00
|
|
|
struct device_node *node = pdev->dev.of_node;
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
struct hwspinlock_device *bank;
|
|
|
|
struct hwspinlock *hwlock;
|
2011-02-18 00:52:03 +07:00
|
|
|
struct resource *res;
|
|
|
|
void __iomem *io_base;
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
int num_locks, i, ret;
|
2015-03-05 09:01:16 +07:00
|
|
|
/* Only a single hwspinlock block device is supported */
|
|
|
|
int base_id = 0;
|
2011-02-18 00:52:03 +07:00
|
|
|
|
2015-03-05 09:01:16 +07:00
|
|
|
if (!node)
|
hwspinlock/core/omap: fix id issues on multiple hwspinlock devices
hwspinlock devices provide system-wide hardware locks that are used
by remote processors that have no other way to achieve synchronization.
To achieve that, each physical lock must have a system-wide id number
that is agreed upon, otherwise remote processors can't possibly assume
they're using the same hardware lock.
Usually boards have a single hwspinlock device, which provides several
hwspinlocks, and in this case, they can be trivially numbered 0 to
(num-of-locks - 1).
In case boards have several hwspinlocks devices, a different base id
should be used for each hwspinlock device (they can't all use 0 as
a starting id!).
While this is certainly not common, it's just plain wrong to just
silently use 0 as a base id whenever the hwspinlock driver is probed.
This patch provides a hwspinlock_pdata structure, that boards can use
to set a different base id for each of the hwspinlock devices they may
have, and demonstrates how to use it with the omap hwspinlock driver.
While we're at it, make sure the hwspinlock core prints an explicit
error message in case an hwspinlock is registered with an id number
that already exists; this will help users catch such base id issues.
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Acked-by: Tony Lindgren <tony@atomide.com>
2011-09-06 03:15:06 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2011-02-18 00:52:03 +07:00
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
if (!res)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
io_base = ioremap(res->start, resource_size(res));
|
2011-09-05 21:30:34 +07:00
|
|
|
if (!io_base)
|
|
|
|
return -ENOMEM;
|
2011-02-18 00:52:03 +07:00
|
|
|
|
2014-07-03 06:00:59 +07:00
|
|
|
/*
|
|
|
|
* make sure the module is enabled and clocked before reading
|
|
|
|
* the module SYSSTATUS register
|
|
|
|
*/
|
|
|
|
pm_runtime_enable(&pdev->dev);
|
|
|
|
ret = pm_runtime_get_sync(&pdev->dev);
|
|
|
|
if (ret < 0) {
|
|
|
|
pm_runtime_put_noidle(&pdev->dev);
|
|
|
|
goto iounmap_base;
|
|
|
|
}
|
|
|
|
|
2011-02-18 00:52:03 +07:00
|
|
|
/* Determine number of locks */
|
|
|
|
i = readl(io_base + SYSSTATUS_OFFSET);
|
|
|
|
i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
|
|
|
|
|
2014-07-03 06:00:59 +07:00
|
|
|
/*
|
|
|
|
* runtime PM will make sure the clock of this module is
|
|
|
|
* enabled again iff at least one lock is requested
|
|
|
|
*/
|
|
|
|
ret = pm_runtime_put(&pdev->dev);
|
|
|
|
if (ret < 0)
|
|
|
|
goto iounmap_base;
|
|
|
|
|
2011-02-18 00:52:03 +07:00
|
|
|
/* one of the four lsb's must be set, and nothing else */
|
|
|
|
if (hweight_long(i & 0xf) != 1 || i > 8) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto iounmap_base;
|
|
|
|
}
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
num_locks = i * 32; /* actual number of locks in this device */
|
2011-09-05 21:30:34 +07:00
|
|
|
|
treewide: Use struct_size() for kmalloc()-family
One of the more common cases of allocation size calculations is finding
the size of a structure that has a zero-sized array at the end, along
with memory for some number of elements for that array. For example:
struct foo {
int stuff;
void *entry[];
};
instance = kmalloc(sizeof(struct foo) + sizeof(void *) * count, GFP_KERNEL);
Instead of leaving these open-coded and prone to type mistakes, we can
now use the new struct_size() helper:
instance = kmalloc(struct_size(instance, entry, count), GFP_KERNEL);
This patch makes the changes for kmalloc()-family (and kvmalloc()-family)
uses. It was done via automatic conversion with manual review for the
"CHECKME" non-standard cases noted below, using the following Coccinelle
script:
// pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
// sizeof *pkey_cache->table, GFP_KERNEL);
@@
identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc";
expression GFP;
identifier VAR, ELEMENT;
expression COUNT;
@@
- alloc(sizeof(*VAR) + COUNT * sizeof(*VAR->ELEMENT), GFP)
+ alloc(struct_size(VAR, ELEMENT, COUNT), GFP)
// mr = kzalloc(sizeof(*mr) + m * sizeof(mr->map[0]), GFP_KERNEL);
@@
identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc";
expression GFP;
identifier VAR, ELEMENT;
expression COUNT;
@@
- alloc(sizeof(*VAR) + COUNT * sizeof(VAR->ELEMENT[0]), GFP)
+ alloc(struct_size(VAR, ELEMENT, COUNT), GFP)
// Same pattern, but can't trivially locate the trailing element name,
// or variable name.
@@
identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc";
expression GFP;
expression SOMETHING, COUNT, ELEMENT;
@@
- alloc(sizeof(SOMETHING) + COUNT * sizeof(ELEMENT), GFP)
+ alloc(CHECKME_struct_size(&SOMETHING, ELEMENT, COUNT), GFP)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-05-09 03:45:50 +07:00
|
|
|
bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
if (!bank) {
|
2011-09-05 21:30:34 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto iounmap_base;
|
|
|
|
}
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
platform_set_drvdata(pdev, bank);
|
2011-02-18 00:52:03 +07:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
|
|
|
|
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
|
2011-02-18 00:52:03 +07:00
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
|
2015-03-05 09:01:16 +07:00
|
|
|
base_id, num_locks);
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
if (ret)
|
|
|
|
goto reg_fail;
|
2011-02-18 00:52:03 +07:00
|
|
|
|
2019-05-31 09:13:21 +07:00
|
|
|
dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
|
|
|
|
num_locks);
|
|
|
|
|
2011-02-18 00:52:03 +07:00
|
|
|
return 0;
|
|
|
|
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
reg_fail:
|
|
|
|
kfree(bank);
|
2011-02-18 00:52:03 +07:00
|
|
|
iounmap_base:
|
2014-07-03 06:00:59 +07:00
|
|
|
pm_runtime_disable(&pdev->dev);
|
2011-02-18 00:52:03 +07:00
|
|
|
iounmap(io_base);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-20 01:25:52 +07:00
|
|
|
static int omap_hwspinlock_remove(struct platform_device *pdev)
|
2011-02-18 00:52:03 +07:00
|
|
|
{
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
|
|
|
|
void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hwspin_lock_unregister(bank);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
|
|
|
|
return ret;
|
2011-02-18 00:52:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
pm_runtime_disable(&pdev->dev);
|
hwspinlock/core: register a bank of hwspinlocks in a single API call
Hardware Spinlock devices usually contain numerous locks (known
devices today support between 32 to 256 locks).
Originally hwspinlock core required drivers to register (and later,
when needed, unregister) each lock separately.
That worked, but required hwspinlocks drivers to do a bit extra work
when they were probed/removed.
This patch changes hwspin_lock_{un}register() to allow a bank of
hwspinlocks to be {un}registered in a single invocation.
A new 'struct hwspinlock_device', which contains an array of 'struct
hwspinlock's is now being passed to the core upon registration (so
instead of wrapping each struct hwspinlock, a priv member has been added
to allow drivers to piggyback their private data with each hwspinlock).
While at it, several per-lock members were moved to be per-device:
1. struct device *dev
2. struct hwspinlock_ops *ops
In addition, now that the array of locks is handled by the core,
there's no reason to maintain a per-lock 'int id' member: the id of the
lock anyway equals to its index in the bank's array plus the bank's
base_id.
Remove this per-lock id member too, and instead use a simple pointers
arithmetic to derive it.
As a result of this change, hwspinlocks drivers are now simpler and smaller
(about %20 code reduction) and the memory footprint of the hwspinlock
framework is reduced.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
2011-09-06 19:39:21 +07:00
|
|
|
iounmap(io_base);
|
|
|
|
kfree(bank);
|
2011-02-18 00:52:03 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-05 09:01:16 +07:00
|
|
|
static const struct of_device_id omap_hwspinlock_of_match[] = {
|
|
|
|
{ .compatible = "ti,omap4-hwspinlock", },
|
2019-05-31 09:13:20 +07:00
|
|
|
{ .compatible = "ti,am654-hwspinlock", },
|
2015-03-05 09:01:16 +07:00
|
|
|
{ /* end */ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
|
|
|
|
|
2011-02-18 00:52:03 +07:00
|
|
|
static struct platform_driver omap_hwspinlock_driver = {
|
|
|
|
.probe = omap_hwspinlock_probe,
|
2012-11-20 01:20:13 +07:00
|
|
|
.remove = omap_hwspinlock_remove,
|
2011-02-18 00:52:03 +07:00
|
|
|
.driver = {
|
|
|
|
.name = "omap_hwspinlock",
|
2015-03-05 09:01:16 +07:00
|
|
|
.of_match_table = of_match_ptr(omap_hwspinlock_of_match),
|
2011-02-18 00:52:03 +07:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init omap_hwspinlock_init(void)
|
|
|
|
{
|
|
|
|
return platform_driver_register(&omap_hwspinlock_driver);
|
|
|
|
}
|
|
|
|
/* board init code might need to reserve hwspinlocks for predefined purposes */
|
|
|
|
postcore_initcall(omap_hwspinlock_init);
|
|
|
|
|
|
|
|
static void __exit omap_hwspinlock_exit(void)
|
|
|
|
{
|
|
|
|
platform_driver_unregister(&omap_hwspinlock_driver);
|
|
|
|
}
|
|
|
|
module_exit(omap_hwspinlock_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
|
|
|
|
MODULE_AUTHOR("Simon Que <sque@ti.com>");
|
|
|
|
MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
|
|
|
|
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
|