mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 08:33:45 +07:00
752ade68cb
There are many code paths opencoding kvmalloc. Let's use the helper instead. The main difference to kvmalloc is that those users are usually not considering all the aspects of the memory allocator. E.g. allocation requests <= 32kB (with 4kB pages) are basically never failing and invoke OOM killer to satisfy the allocation. This sounds too disruptive for something that has a reasonable fallback - the vmalloc. On the other hand those requests might fallback to vmalloc even when the memory allocator would succeed after several more reclaim/compaction attempts previously. There is no guarantee something like that happens though. This patch converts many of those places to kv[mz]alloc* helpers because they are more conservative. Link: http://lkml.kernel.org/r/20170306103327.2766-2-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> # Xen bits Acked-by: Kees Cook <keescook@chromium.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Andreas Dilger <andreas.dilger@intel.com> # Lustre Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> # KVM/s390 Acked-by: Dan Williams <dan.j.williams@intel.com> # nvdim Acked-by: David Sterba <dsterba@suse.com> # btrfs Acked-by: Ilya Dryomov <idryomov@gmail.com> # Ceph Acked-by: Tariq Toukan <tariqt@mellanox.com> # mlx4 Acked-by: Leon Romanovsky <leonro@mellanox.com> # mlx5 Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Anton Vorontsov <anton@enomsg.org> Cc: Colin Cross <ccross@android.com> Cc: Tony Luck <tony.luck@intel.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Kent Overstreet <kent.overstreet@gmail.com> Cc: Santosh Raspatur <santosh@chelsio.com> Cc: Hariprasad S <hariprasad@chelsio.com> Cc: Yishai Hadas <yishaih@mellanox.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: "Yan, Zheng" <zyan@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
334 lines
8.4 KiB
C
334 lines
8.4 KiB
C
/*
|
|
* This file is part of the Chelsio T4 Ethernet driver for Linux.
|
|
* Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
|
|
*
|
|
* Written by Deepak (deepak.s@chelsio.com)
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
|
|
* release for licensing terms and conditions.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/jhash.h>
|
|
#include <linux/if_vlan.h>
|
|
#include <net/addrconf.h>
|
|
#include "cxgb4.h"
|
|
#include "clip_tbl.h"
|
|
|
|
static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
|
|
{
|
|
unsigned int clipt_size_half = c->clipt_size / 2;
|
|
|
|
return jhash_1word(*key, 0) % clipt_size_half;
|
|
}
|
|
|
|
static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
|
|
{
|
|
unsigned int clipt_size_half = d->clipt_size / 2;
|
|
u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
|
|
|
|
return clipt_size_half +
|
|
(jhash_1word(xor, 0) % clipt_size_half);
|
|
}
|
|
|
|
static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
|
|
u8 v6)
|
|
{
|
|
return v6 ? ipv6_clip_hash(ctbl, addr) :
|
|
ipv4_clip_hash(ctbl, addr);
|
|
}
|
|
|
|
static int clip6_get_mbox(const struct net_device *dev,
|
|
const struct in6_addr *lip)
|
|
{
|
|
struct adapter *adap = netdev2adap(dev);
|
|
struct fw_clip_cmd c;
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
|
|
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
|
|
c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
|
|
*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
|
|
*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
|
|
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
|
|
}
|
|
|
|
static int clip6_release_mbox(const struct net_device *dev,
|
|
const struct in6_addr *lip)
|
|
{
|
|
struct adapter *adap = netdev2adap(dev);
|
|
struct fw_clip_cmd c;
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
|
|
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
|
c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
|
|
*(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
|
|
*(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
|
|
return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
|
|
}
|
|
|
|
int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
|
|
{
|
|
struct adapter *adap = netdev2adap(dev);
|
|
struct clip_tbl *ctbl = adap->clipt;
|
|
struct clip_entry *ce, *cte;
|
|
u32 *addr = (u32 *)lip;
|
|
int hash;
|
|
int ret = -1;
|
|
|
|
if (!ctbl)
|
|
return 0;
|
|
|
|
hash = clip_addr_hash(ctbl, addr, v6);
|
|
|
|
read_lock_bh(&ctbl->lock);
|
|
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
|
|
if (cte->addr6.sin6_family == AF_INET6 && v6)
|
|
ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
|
|
sizeof(struct in6_addr));
|
|
else if (cte->addr.sin_family == AF_INET && !v6)
|
|
ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
|
|
sizeof(struct in_addr));
|
|
if (!ret) {
|
|
ce = cte;
|
|
read_unlock_bh(&ctbl->lock);
|
|
goto found;
|
|
}
|
|
}
|
|
read_unlock_bh(&ctbl->lock);
|
|
|
|
write_lock_bh(&ctbl->lock);
|
|
if (!list_empty(&ctbl->ce_free_head)) {
|
|
ce = list_first_entry(&ctbl->ce_free_head,
|
|
struct clip_entry, list);
|
|
list_del(&ce->list);
|
|
INIT_LIST_HEAD(&ce->list);
|
|
spin_lock_init(&ce->lock);
|
|
atomic_set(&ce->refcnt, 0);
|
|
atomic_dec(&ctbl->nfree);
|
|
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
|
|
if (v6) {
|
|
ce->addr6.sin6_family = AF_INET6;
|
|
memcpy(ce->addr6.sin6_addr.s6_addr,
|
|
lip, sizeof(struct in6_addr));
|
|
ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
|
|
if (ret) {
|
|
write_unlock_bh(&ctbl->lock);
|
|
dev_err(adap->pdev_dev,
|
|
"CLIP FW cmd failed with error %d, "
|
|
"Connections using %pI6c wont be "
|
|
"offloaded",
|
|
ret, ce->addr6.sin6_addr.s6_addr);
|
|
return ret;
|
|
}
|
|
} else {
|
|
ce->addr.sin_family = AF_INET;
|
|
memcpy((char *)(&ce->addr.sin_addr), lip,
|
|
sizeof(struct in_addr));
|
|
}
|
|
} else {
|
|
write_unlock_bh(&ctbl->lock);
|
|
dev_info(adap->pdev_dev, "CLIP table overflow, "
|
|
"Connections using %pI6c wont be offloaded",
|
|
(void *)lip);
|
|
return -ENOMEM;
|
|
}
|
|
write_unlock_bh(&ctbl->lock);
|
|
found:
|
|
atomic_inc(&ce->refcnt);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(cxgb4_clip_get);
|
|
|
|
void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
|
|
{
|
|
struct adapter *adap = netdev2adap(dev);
|
|
struct clip_tbl *ctbl = adap->clipt;
|
|
struct clip_entry *ce, *cte;
|
|
u32 *addr = (u32 *)lip;
|
|
int hash;
|
|
int ret = -1;
|
|
|
|
if (!ctbl)
|
|
return;
|
|
|
|
hash = clip_addr_hash(ctbl, addr, v6);
|
|
|
|
read_lock_bh(&ctbl->lock);
|
|
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
|
|
if (cte->addr6.sin6_family == AF_INET6 && v6)
|
|
ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
|
|
sizeof(struct in6_addr));
|
|
else if (cte->addr.sin_family == AF_INET && !v6)
|
|
ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
|
|
sizeof(struct in_addr));
|
|
if (!ret) {
|
|
ce = cte;
|
|
read_unlock_bh(&ctbl->lock);
|
|
goto found;
|
|
}
|
|
}
|
|
read_unlock_bh(&ctbl->lock);
|
|
|
|
return;
|
|
found:
|
|
write_lock_bh(&ctbl->lock);
|
|
spin_lock_bh(&ce->lock);
|
|
if (atomic_dec_and_test(&ce->refcnt)) {
|
|
list_del(&ce->list);
|
|
INIT_LIST_HEAD(&ce->list);
|
|
list_add_tail(&ce->list, &ctbl->ce_free_head);
|
|
atomic_inc(&ctbl->nfree);
|
|
if (v6)
|
|
clip6_release_mbox(dev, (const struct in6_addr *)lip);
|
|
}
|
|
spin_unlock_bh(&ce->lock);
|
|
write_unlock_bh(&ctbl->lock);
|
|
}
|
|
EXPORT_SYMBOL(cxgb4_clip_release);
|
|
|
|
/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
|
|
* a physical device.
|
|
* The physical device reference is needed to send the actul CLIP command.
|
|
*/
|
|
static int cxgb4_update_dev_clip(struct net_device *root_dev,
|
|
struct net_device *dev)
|
|
{
|
|
struct inet6_dev *idev = NULL;
|
|
struct inet6_ifaddr *ifa;
|
|
int ret = 0;
|
|
|
|
idev = __in6_dev_get(root_dev);
|
|
if (!idev)
|
|
return ret;
|
|
|
|
read_lock_bh(&idev->lock);
|
|
list_for_each_entry(ifa, &idev->addr_list, if_list) {
|
|
ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
|
|
if (ret < 0)
|
|
break;
|
|
}
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int cxgb4_update_root_dev_clip(struct net_device *dev)
|
|
{
|
|
struct net_device *root_dev = NULL;
|
|
int i, ret = 0;
|
|
|
|
/* First populate the real net device's IPv6 addresses */
|
|
ret = cxgb4_update_dev_clip(dev, dev);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* Parse all bond and vlan devices layered on top of the physical dev */
|
|
root_dev = netdev_master_upper_dev_get_rcu(dev);
|
|
if (root_dev) {
|
|
ret = cxgb4_update_dev_clip(root_dev, dev);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
for (i = 0; i < VLAN_N_VID; i++) {
|
|
root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
|
|
if (!root_dev)
|
|
continue;
|
|
|
|
ret = cxgb4_update_dev_clip(root_dev, dev);
|
|
if (ret)
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
|
|
|
|
int clip_tbl_show(struct seq_file *seq, void *v)
|
|
{
|
|
struct adapter *adapter = seq->private;
|
|
struct clip_tbl *ctbl = adapter->clipt;
|
|
struct clip_entry *ce;
|
|
char ip[60];
|
|
int i;
|
|
|
|
read_lock_bh(&ctbl->lock);
|
|
|
|
seq_puts(seq, "IP Address Users\n");
|
|
for (i = 0 ; i < ctbl->clipt_size; ++i) {
|
|
list_for_each_entry(ce, &ctbl->hash_list[i], list) {
|
|
ip[0] = '\0';
|
|
sprintf(ip, "%pISc", &ce->addr);
|
|
seq_printf(seq, "%-25s %u\n", ip,
|
|
atomic_read(&ce->refcnt));
|
|
}
|
|
}
|
|
seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
|
|
|
|
read_unlock_bh(&ctbl->lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
|
|
unsigned int clipt_end)
|
|
{
|
|
struct clip_entry *cl_list;
|
|
struct clip_tbl *ctbl;
|
|
unsigned int clipt_size;
|
|
int i;
|
|
|
|
if (clipt_start >= clipt_end)
|
|
return NULL;
|
|
clipt_size = clipt_end - clipt_start + 1;
|
|
if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
|
|
return NULL;
|
|
|
|
ctbl = kvzalloc(sizeof(*ctbl) +
|
|
clipt_size*sizeof(struct list_head), GFP_KERNEL);
|
|
if (!ctbl)
|
|
return NULL;
|
|
|
|
ctbl->clipt_start = clipt_start;
|
|
ctbl->clipt_size = clipt_size;
|
|
INIT_LIST_HEAD(&ctbl->ce_free_head);
|
|
|
|
atomic_set(&ctbl->nfree, clipt_size);
|
|
rwlock_init(&ctbl->lock);
|
|
|
|
for (i = 0; i < ctbl->clipt_size; ++i)
|
|
INIT_LIST_HEAD(&ctbl->hash_list[i]);
|
|
|
|
cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL);
|
|
if (!cl_list) {
|
|
kvfree(ctbl);
|
|
return NULL;
|
|
}
|
|
ctbl->cl_list = (void *)cl_list;
|
|
|
|
for (i = 0; i < clipt_size; i++) {
|
|
INIT_LIST_HEAD(&cl_list[i].list);
|
|
list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
|
|
}
|
|
|
|
return ctbl;
|
|
}
|
|
|
|
void t4_cleanup_clip_tbl(struct adapter *adap)
|
|
{
|
|
struct clip_tbl *ctbl = adap->clipt;
|
|
|
|
if (ctbl) {
|
|
if (ctbl->cl_list)
|
|
kvfree(ctbl->cl_list);
|
|
kvfree(ctbl);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(t4_cleanup_clip_tbl);
|