Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Fix sk_psock reference count leak on receive, from Xiyu Yang.

 2) CONFIG_HNS should be invisible, from Geert Uytterhoeven.

 3) Don't allow locking route MTUs in ipv6, RFCs actually forbid this,
    from Maciej Żenczykowski.

 4) ipv4 route redirect backoff wasn't actually enforced, from Paolo
    Abeni.

 5) Fix netprio cgroup v2 leak, from Zefan Li.

 6) Fix infinite loop on rmmod in conntrack, from Florian Westphal.

 7) Fix tcp SO_RCVLOWAT hangs, from Eric Dumazet.

 8) Various bpf probe handling fixes, from Daniel Borkmann.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (68 commits)
  selftests: mptcp: pm: rm the right tmp file
  dpaa2-eth: properly handle buffer size restrictions
  bpf: Restrict bpf_trace_printk()'s %s usage and add %pks, %pus specifier
  bpf: Add bpf_probe_read_{user, kernel}_str() to do_refine_retval_range
  bpf: Restrict bpf_probe_read{, str}() only to archs where they work
  MAINTAINERS: Mark networking drivers as Maintained.
  ipmr: Add lockdep expression to ipmr_for_each_table macro
  ipmr: Fix RCU list debugging warning
  drivers: net: hamradio: Fix suspicious RCU usage warning in bpqether.c
  net: phy: broadcom: fix BCM54XX_SHD_SCR3_TRDDAPD value for BCM54810
  tcp: fix error recovery in tcp_zerocopy_receive()
  MAINTAINERS: Add Jakub to networking drivers.
  MAINTAINERS: another add of Karsten Graul for S390 networking
  drivers: ipa: fix typos for ipa_smp2p structure doc
  pppoe: only process PADT targeted at local interfaces
  selftests/bpf: Enforce returning 0 for fentry/fexit programs
  bpf: Enforce returning 0 for fentry/fexit progs
  net: stmmac: fix num_por initialization
  security: Fix the default value of secid_to_secctx hook
  libbpf: Fix register naming in PT_REGS s390 macros
  ...
This commit is contained in:
Linus Torvalds 2020-05-15 13:10:06 -07:00
commit f85c1598dd
78 changed files with 460 additions and 205 deletions

View File

@ -112,6 +112,20 @@ used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
when tail-calls are used and marked with the noreturn GCC attribute.
Probed Pointers from BPF / tracing
----------------------------------
::
%pks kernel string
%pus user string
The ``k`` and ``u`` specifiers are used for printing prior probed memory from
either kernel memory (k) or user memory (u). The subsequent ``s`` specifier
results in printing a string. For direct use in regular vsnprintf() the (k)
and (u) annotation is ignored, however, when used out of BPF's bpf_trace_printk(),
for example, it reads the memory it is pointing to without faulting.
Kernel Pointers
---------------

View File

@ -11710,8 +11710,9 @@ F: net/core/drop_monitor.c
NETWORKING DRIVERS
M: "David S. Miller" <davem@davemloft.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Odd Fixes
S: Maintained
W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@ -14634,6 +14635,7 @@ F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
M: Ursula Braun <ubraun@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
@ -14644,6 +14646,7 @@ F: net/iucv/
S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Karsten Graul <kgraul@linux.ibm.com>
M: Ursula Braun <ubraun@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported

View File

@ -12,6 +12,7 @@ config ARM
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_SETUP_DMA_OPS

View File

@ -147,6 +147,7 @@ CONFIG_I2C_DAVINCI=y
CONFIG_SPI=y
CONFIG_SPI_DAVINCI=y
CONFIG_SPI_SPIDEV=y
CONFIG_PTP_1588_CLOCK=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y

View File

@ -274,6 +274,7 @@ CONFIG_SPI_TI_QSPI=m
CONFIG_HSI=m
CONFIG_OMAP_SSI=m
CONFIG_SSI_PROTOCOL=m
CONFIG_PTP_1588_CLOCK=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y

View File

@ -20,6 +20,7 @@ config ARM64
select ARCH_HAS_KCOV
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS

View File

@ -68,6 +68,7 @@ config X86
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL

View File

@ -136,25 +136,21 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
if (family == AF_INET)
if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
else
err = IP6_ECN_decapsulate(oiph, skb);
#endif
if (unlikely(err)) {
if (log_ecn_error) {
if (family == AF_INET)
if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
((struct iphdr *)oiph)->tos);
#if IS_ENABLED(CONFIG_IPV6)
else
net_info_ratelimited("non-ECT from %pI6\n",
&((struct ipv6hdr *)oiph)->saddr);
#endif
}
if (err > 1) {
++bareudp->dev->stats.rx_frame_errors;
@ -350,7 +346,6 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
@ -411,7 +406,6 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
dst_release(dst);
return err;
}
#endif
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
{
@ -435,11 +429,9 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
}
rcu_read_lock();
#if IS_ENABLED(CONFIG_IPV6)
if (info->mode & IP_TUNNEL_INFO_IPV6)
if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
#endif
err = bareudp_xmit_skb(skb, dev, bareudp, info);
rcu_read_unlock();
@ -467,7 +459,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
if (ip_tunnel_info_af(info) == AF_INET) {
if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
@ -478,7 +470,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
ip_rt_put(rt);
info->key.u.ipv4.src = saddr;
#if IS_ENABLED(CONFIG_IPV6)
} else if (ip_tunnel_info_af(info) == AF_INET6) {
struct dst_entry *dst;
struct in6_addr saddr;
@ -492,7 +483,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
dst_release(dst);
info->key.u.ipv6.src = saddr;
#endif
} else {
return -EINVAL;
}

View File

@ -360,6 +360,7 @@ static void __exit dsa_loop_exit(void)
}
module_exit(dsa_loop_exit);
MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");

View File

@ -69,6 +69,7 @@ config BCMGENET
select BCM7XXX_PHY
select MDIO_BCM_UNIMAC
select DIMLIB
select BROADCOM_PHY if ARCH_BCM2835
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.

View File

@ -77,6 +77,7 @@ config UCC_GETH
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
select PHYLIB
select FIXED_PHY
---help---
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
@ -90,6 +91,7 @@ config GIANFAR
depends on HAS_DMA
select FSL_PQ_MDIO
select PHYLIB
select FIXED_PHY
select CRC32
---help---
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,

View File

@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
depends on FSL_DPAA && FSL_FMAN
select PHYLIB
select FIXED_PHY
select FSL_FMAN_MAC
---help---
Data Path Acceleration Architecture Ethernet driver,

View File

@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)sg_vaddr, 0);
@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
(page_address(page) - page_address(head_page));
skb_add_rx_frag(skb, i - 1, head_page, page_offset,
sg_length, DPAA2_ETH_RX_BUF_SIZE);
sg_length, priv->rx_buf_size);
}
if (dpaa2_sg_is_final(sge))
@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)vaddr, 0);
}
@ -335,7 +335,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
break;
case XDP_REDIRECT:
dma_unmap_page(priv->net_dev->dev.parent, addr,
DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
priv->rx_buf_size, DMA_BIDIRECTIONAL);
ch->buf_count--;
xdp.data_hard_start = vaddr;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
@ -374,7 +374,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
@ -393,13 +393,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0);
@ -974,7 +974,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
if (!page)
goto err_alloc;
addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@ -984,7 +984,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
page, DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, DPAA2_ETH_RX_BUF_SIZE,
addr, priv->rx_buf_size,
bpid);
}
@ -1720,7 +1720,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
int mfl, linear_mfl;
mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
if (mfl > linear_mfl) {
@ -2462,6 +2462,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
else
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
/* We need to ensure that the buffer size seen by WRIOP is a multiple
* of 64 or 256 bytes depending on the WRIOP version.
*/
priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true;
@ -3126,7 +3131,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
if (err) {
dev_err(dev, "dpni_set_pools() failed\n");

View File

@ -382,6 +382,7 @@ struct dpaa2_eth_priv {
u16 tx_data_offset;
struct fsl_mc_device *dpbp_dev;
u16 rx_buf_size;
u16 bpid;
struct iommu_domain *iommu_domain;

View File

@ -635,7 +635,7 @@ static int num_rules(struct dpaa2_eth_priv *priv)
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
int location)
unsigned int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;

View File

@ -64,7 +64,7 @@ config HNS_MDIO
the PHY
config HNS
tristate "Hisilicon Network Subsystem Support (Framework)"
tristate
---help---
This selects the framework support for Hisilicon Network Subsystem. It
is needed by any driver which provides HNS acceleration engine or make

View File

@ -45,6 +45,8 @@
#define MGMT_MSG_TIMEOUT 5000
#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
u8 *buf_in, u16 in_size,
u8 *buf_out, u16 *out_size,
enum mgmt_direction_type direction,
u16 resp_msg_id)
u16 resp_msg_id, u32 timeout)
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
struct hinic_recv_msg *recv_msg;
struct completion *recv_done;
unsigned long timeo;
u16 msg_id;
int err;
@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
goto unlock_sync_msg;
}
if (!wait_for_completion_timeout(recv_done,
msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
if (!wait_for_completion_timeout(recv_done, timeo)) {
dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
err = -ETIMEDOUT;
goto unlock_sync_msg;
@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
{
struct hinic_hwif *hwif = pf_to_mgmt->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 timeout = 0;
if (sync != HINIC_MGMT_MSG_SYNC) {
dev_err(&pdev->dev, "Invalid MGMT msg type\n");
@ -353,9 +358,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL;
}
if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
MSG_NOT_RESP);
MSG_NOT_RESP, timeout);
}
/**

View File

@ -483,7 +483,6 @@ static int hinic_close(struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
int err;
down(&nic_dev->mgmt_lock);
@ -497,20 +496,9 @@ static int hinic_close(struct net_device *netdev)
up(&nic_dev->mgmt_lock);
err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
if (err) {
netif_err(nic_dev, drv, netdev,
"Failed to set func port state\n");
nic_dev->flags |= (flags & HINIC_INTF_UP);
return err;
}
hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
if (err) {
netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
nic_dev->flags |= (flags & HINIC_INTF_UP);
return err;
}
hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
if (nic_dev->flags & HINIC_RSS_ENABLE) {
hinic_rss_deinit(nic_dev);

View File

@ -497,13 +497,17 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
if (!hw->irq_name)
if (!hw->irq_name) {
err = -ENOMEM;
goto err_free_netdev;
}
hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
sizeof(cpumask_var_t), GFP_KERNEL);
if (!hw->affinity_mask)
if (!hw->affinity_mask) {
err = -ENOMEM;
goto err_free_netdev;
}
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {

View File

@ -1070,7 +1070,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
if (unlikely(ret)) {
netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
ret);
goto out_free;
goto out_stop;
}
eidled = encx24j600_read_reg(priv, EIDLED);
@ -1088,6 +1088,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
out_unregister:
unregister_netdev(priv->ndev);
out_stop:
kthread_stop(priv->kworker_task);
out_free:
free_netdev(ndev);
@ -1100,6 +1102,7 @@ static int encx24j600_spi_remove(struct spi_device *spi)
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
unregister_netdev(priv->ndev);
kthread_stop(priv->kworker_task);
free_netdev(priv->ndev);

View File

@ -333,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
goto err_free_alink;
alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
if (!alink->prio_map)
if (!alink->prio_map) {
err = -ENOMEM;
goto err_free_alink;
}
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.

View File

@ -2118,6 +2118,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
dev_info(ionic->dev, "FW Up: restarting LIFs\n");
ionic_init_devinfo(ionic);
ionic_port_init(ionic);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out;
@ -2348,7 +2349,17 @@ static int ionic_station_set(struct ionic_lif *lif)
if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
return 0;
if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) {
if (!is_zero_ether_addr(netdev->dev_addr)) {
/* If the netdev mac is non-zero and doesn't match the default
* device address, it was set by something earlier and we're
* likely here again after a fw-upgrade reset. We need to be
* sure the netdev mac is in our filter list.
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
ionic_lif_addr(lif, netdev->dev_addr, true);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
@ -2358,12 +2369,6 @@ static int ionic_station_set(struct ionic_lif *lif)
return 0;
}
if (!is_zero_ether_addr(netdev->dev_addr)) {
netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
netdev->dev_addr);
ionic_lif_addr(lif, netdev->dev_addr, false);
}
eth_commit_mac_addr_change(netdev, &addr);
}

View File

@ -509,16 +509,16 @@ int ionic_port_init(struct ionic *ionic)
size_t sz;
int err;
if (idev->port_info)
return 0;
idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz,
&idev->port_info_pa,
GFP_KERNEL);
if (!idev->port_info) {
dev_err(ionic->dev, "Failed to allocate port info, aborting\n");
return -ENOMEM;
idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
idev->port_info = dma_alloc_coherent(ionic->dev,
idev->port_info_sz,
&idev->port_info_pa,
GFP_KERNEL);
if (!idev->port_info) {
dev_err(ionic->dev, "Failed to allocate port info\n");
return -ENOMEM;
}
}
sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data));

View File

@ -2127,6 +2127,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
/* RTL8401, reportedly works if treated as RTL8101e */
{ 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
{ 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
{ 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },

View File

@ -75,6 +75,11 @@ struct ethqos_emac_por {
unsigned int value;
};
struct ethqos_emac_driver_data {
const struct ethqos_emac_por *por;
unsigned int num_por;
};
struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
{ .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
};
static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
.por = emac_v2_3_0_por,
.num_por = ARRAY_SIZE(emac_v2_3_0_por),
};
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
unsigned int val;
@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
const struct ethqos_emac_driver_data *data;
struct qcom_ethqos *ethqos;
struct resource *res;
int ret;
@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
goto err_mem;
}
ethqos->por = of_device_get_match_data(&pdev->dev);
data = of_device_get_match_data(&pdev->dev);
ethqos->por = data->por;
ethqos->num_por = data->num_por;
ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
if (IS_ERR(ethqos->rgmii_clk)) {
@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_ethqos_match[] = {
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
{ .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
{ }
};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);

View File

@ -49,6 +49,7 @@ config TI_CPSW_PHY_SEL
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
depends on TI_CPTS || !TI_CPTS
select TI_DAVINCI_MDIO
select MFD_SYSCON
select PAGE_POOL
@ -64,6 +65,7 @@ config TI_CPSW_SWITCHDEV
tristate "TI CPSW Switch Support with switchdev"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
depends on NET_SWITCHDEV
depends on TI_CPTS || !TI_CPTS
select PAGE_POOL
select TI_DAVINCI_MDIO
select MFD_SYSCON
@ -77,23 +79,16 @@ config TI_CPSW_SWITCHDEV
will be called cpsw_new.
config TI_CPTS
bool "TI Common Platform Time Sync (CPTS) Support"
depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST
tristate "TI Common Platform Time Sync (CPTS) Support"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
depends on COMMON_CLK
depends on POSIX_TIMERS
depends on PTP_1588_CLOCK
---help---
This driver supports the Common Platform Time Sync unit of
the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
driver offers a PTP Hardware Clock.
config TI_CPTS_MOD
tristate
depends on TI_CPTS
depends on PTP_1588_CLOCK
default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
default m
config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
@ -114,6 +109,7 @@ config TI_KEYSTONE_NETCP
select TI_DAVINCI_MDIO
depends on OF
depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
depends on TI_CPTS || !TI_CPTS
---help---
This driver supports TI's Keystone NETCP Core.

View File

@ -13,7 +13,7 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPTS) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o

View File

@ -127,7 +127,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
{
struct bpqdev *bpq;
list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list,
lockdep_rtnl_is_held()) {
if (bpq->ethdev == dev)
return bpq->axdev;
}

View File

@ -399,13 +399,14 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
/* assert(which < trans->tre_count); */
/* Set the page information for the buffer. We also need to fill in
* the DMA address for the buffer (something dma_map_sg() normally
* does).
* the DMA address and length for the buffer (something dma_map_sg()
* normally does).
*/
sg = &trans->sgl[which];
sg_set_buf(sg, buf, size);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = sg->length;
info = &trans->info[which];
info->opcode = opcode;

View File

@ -628,23 +628,15 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
void ipa_cmd_tag_process_add(struct gsi_trans *trans)
{
ipa_cmd_register_write_add(trans, 0, 0, 0, true);
#if 1
/* Reference these functions to avoid a compile error */
(void)ipa_cmd_ip_packet_init_add;
(void)ipa_cmd_ip_tag_status_add;
(void) ipa_cmd_transfer_add;
#else
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct gsi_endpoint *endpoint;
struct ipa_endpoint *endpoint;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_cmd_register_write_add(trans, 0, 0, 0, true);
ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
ipa_cmd_transfer_add(trans, 4);
#endif
}
/* Returns the number of commands required for the tag process */

View File

@ -53,7 +53,7 @@
* @clock_on: Whether IPA clock is on
* @notified: Whether modem has been notified of clock state
* @disabled: Whether setup ready interrupt handling is disabled
* @mutex mutex: Motex protecting ready interrupt/shutdown interlock
* @mutex: Mutex protecting ready-interrupt/shutdown interlock
* @panic_notifier: Panic notifier structure
*/
struct ipa_smp2p {

View File

@ -225,8 +225,12 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
else
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
val |= BCM54XX_SHD_SCR3_TRDDAPD;
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810)
val |= BCM54810_SHD_SCR3_TRDDAPD;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
}
if (orig != val)
bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);

View File

@ -1132,9 +1132,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
/* Restart autonegotiation so the new modes get sent to the
* link partner.
*/
ret = phy_restart_aneg(phydev);
if (ret < 0)
return ret;
if (phydev->autoneg == AUTONEG_ENABLE) {
ret = phy_restart_aneg(phydev);
if (ret < 0)
return ret;
}
}
return 0;

View File

@ -490,6 +490,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto out;
if (skb->pkt_type != PACKET_HOST)
goto abort;
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto abort;

View File

@ -2659,7 +2659,7 @@ static struct hso_device *hso_create_bulk_serial_device(
if (!
(serial->out_endp =
hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
dev_err(&interface->dev, "Failed to find BULK IN ep\n");
dev_err(&interface->dev, "Failed to find BULK OUT ep\n");
goto exit2;
}

View File

@ -1243,9 +1243,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
break;
} while (rq->vq->num_free);
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
u64_stats_update_begin(&rq->stats.syncp);
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
rq->stats.kicks++;
u64_stats_update_end(&rq->stats.syncp);
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
return !oom;

View File

@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
ISM_NR_DMBS);
if (!ism->smcd)
if (!ism->smcd) {
ret = -ENOMEM;
goto err_resource;
}
ism->smcd->priv = ism;
ret = ism_dev_init(ism);

View File

@ -245,6 +245,7 @@
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
#define BCM54810_SHD_SCR3_TRDDAPD 0x0100
/* BCM54612E Registers */
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)

View File

@ -243,7 +243,7 @@ LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
LSM_HOOK(int, 0, ismaclabel, const char *name)
LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata,
LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
u32 *seclen)
LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)

View File

@ -105,10 +105,10 @@ struct ptp_system_timestamp {
* parameter func: the desired function to use.
* parameter chan: the function channel index to use.
*
* @do_work: Request driver to perform auxiliary (periodic) operations
* Driver should return delay of the next auxiliary work scheduling
* time (>=0) or negative value in case further scheduling
* is not required.
* @do_aux_work: Request driver to perform auxiliary (periodic) operations
* Driver should return delay of the next auxiliary work
* scheduling time (>=0) or negative value in case further
* scheduling is not required.
*
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().

View File

@ -187,6 +187,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
dst->sg.data[which] = src->sg.data[which];
dst->sg.data[which].length = size;
dst->sg.size += size;
src->sg.size -= size;
src->sg.data[which].length -= size;
src->sg.data[which].offset += size;
}

View File

@ -87,7 +87,7 @@ struct nf_conn {
struct hlist_node nat_bysource;
#endif
/* all members below initialized via memset */
u8 __nfct_init_offset[0];
struct { } __nfct_init_offset;
/* If we were expected by an expectation, this will be it */
struct nf_conn *master;

View File

@ -127,6 +127,7 @@ enum nf_flow_flags {
NF_FLOW_HW_DYING,
NF_FLOW_HW_DEAD,
NF_FLOW_HW_REFRESH,
NF_FLOW_HW_PENDING,
};
enum flow_offload_type {

View File

@ -1376,7 +1376,6 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
rx_opt->num_sacks = 0;
}
u32 tcp_default_init_rwnd(u32 mss);
void tcp_cwnd_restart(struct sock *sk, s32 delta);
static inline void tcp_slow_start_after_idle_check(struct sock *sk)
@ -1421,6 +1420,19 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
* If 87.5 % (7/8) of the space has been consumed, we want to override
* SO_RCVLOWAT constraint, since we are receiving skbs with too small
* len/truesize ratio.
*/
static inline bool tcp_rmem_pressure(const struct sock *sk)
{
int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
int threshold = rcvbuf - (rcvbuf >> 3);
return atomic_read(&sk->sk_rmem_alloc) > threshold;
}
extern void tcp_openreq_init_rwin(struct request_sock *req,
const struct sock *sk_listener,
const struct dst_entry *dst);

View File

@ -143,14 +143,12 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
__be16 df, __be16 src_port, __be16 dst_port,
bool xnet, bool nocheck);
#if IS_ENABLED(CONFIG_IPV6)
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be32 label,
__be16 src_port, __be16 dst_port, bool nocheck);
#endif
void udp_tunnel_sock_release(struct socket *sock);

View File

@ -2261,6 +2261,9 @@ config ASN1
source "kernel/Kconfig.locks"
config ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
bool
config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
bool

View File

@ -486,7 +486,12 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
if (!(map->map_flags & BPF_F_MMAPABLE))
return -EINVAL;
return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), pgoff);
if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
return -EINVAL;
return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
vma->vm_pgoff + pgoff);
}
const struct bpf_map_ops array_map_ops = {

View File

@ -1485,8 +1485,10 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
if (err)
goto free_value;
if (copy_to_user(uvalue, value, value_size) != 0)
if (copy_to_user(uvalue, value, value_size) != 0) {
err = -EFAULT;
goto free_value;
}
err = 0;

View File

@ -4340,7 +4340,9 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
if (ret_type != RET_INTEGER ||
(func_id != BPF_FUNC_get_stack &&
func_id != BPF_FUNC_probe_read_str))
func_id != BPF_FUNC_probe_read_str &&
func_id != BPF_FUNC_probe_read_kernel_str &&
func_id != BPF_FUNC_probe_read_user_str))
return;
ret_reg->smax_value = meta->msize_max_value;
@ -7059,6 +7061,23 @@ static int check_return_code(struct bpf_verifier_env *env)
return 0;
range = tnum_const(0);
break;
case BPF_PROG_TYPE_TRACING:
switch (env->prog->expected_attach_type) {
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
range = tnum_const(0);
break;
case BPF_TRACE_RAW_TP:
case BPF_MODIFY_RETURN:
return 0;
default:
return -ENOTSUPP;
}
break;
case BPF_PROG_TYPE_EXT:
/* freplace program can return anything as its return value
* depends on the to-be-replaced kernel func or bpf program.
*/
default:
return 0;
}

View File

@ -323,17 +323,15 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
/*
* Only limited trace_printk() conversion specifiers allowed:
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
*/
BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
u64, arg2, u64, arg3)
{
int i, mod[3] = {}, fmt_cnt = 0;
char buf[64], fmt_ptype;
void *unsafe_ptr = NULL;
bool str_seen = false;
int mod[3] = {};
int fmt_cnt = 0;
u64 unsafe_addr;
char buf[64];
int i;
/*
* bpf_check()->check_func_arg()->check_stack_boundary()
@ -359,40 +357,71 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
if (fmt[i] == 'l') {
mod[fmt_cnt]++;
i++;
} else if (fmt[i] == 'p' || fmt[i] == 's') {
} else if (fmt[i] == 'p') {
mod[fmt_cnt]++;
if ((fmt[i + 1] == 'k' ||
fmt[i + 1] == 'u') &&
fmt[i + 2] == 's') {
fmt_ptype = fmt[i + 1];
i += 2;
goto fmt_str;
}
/* disallow any further format extensions */
if (fmt[i + 1] != 0 &&
!isspace(fmt[i + 1]) &&
!ispunct(fmt[i + 1]))
return -EINVAL;
fmt_cnt++;
if (fmt[i] == 's') {
if (str_seen)
/* allow only one '%s' per fmt string */
return -EINVAL;
str_seen = true;
switch (fmt_cnt) {
case 1:
unsafe_addr = arg1;
arg1 = (long) buf;
break;
case 2:
unsafe_addr = arg2;
arg2 = (long) buf;
break;
case 3:
unsafe_addr = arg3;
arg3 = (long) buf;
break;
}
buf[0] = 0;
strncpy_from_unsafe(buf,
(void *) (long) unsafe_addr,
sizeof(buf));
goto fmt_next;
} else if (fmt[i] == 's') {
mod[fmt_cnt]++;
fmt_ptype = fmt[i];
fmt_str:
if (str_seen)
/* allow only one '%s' per fmt string */
return -EINVAL;
str_seen = true;
if (fmt[i + 1] != 0 &&
!isspace(fmt[i + 1]) &&
!ispunct(fmt[i + 1]))
return -EINVAL;
switch (fmt_cnt) {
case 0:
unsafe_ptr = (void *)(long)arg1;
arg1 = (long)buf;
break;
case 1:
unsafe_ptr = (void *)(long)arg2;
arg2 = (long)buf;
break;
case 2:
unsafe_ptr = (void *)(long)arg3;
arg3 = (long)buf;
break;
}
continue;
buf[0] = 0;
switch (fmt_ptype) {
case 's':
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
strncpy_from_unsafe(buf, unsafe_ptr,
sizeof(buf));
break;
#endif
case 'k':
strncpy_from_unsafe_strict(buf, unsafe_ptr,
sizeof(buf));
break;
case 'u':
strncpy_from_unsafe_user(buf,
(__force void __user *)unsafe_ptr,
sizeof(buf));
break;
}
goto fmt_next;
}
if (fmt[i] == 'l') {
@ -403,6 +432,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
if (fmt[i] != 'i' && fmt[i] != 'd' &&
fmt[i] != 'u' && fmt[i] != 'x')
return -EINVAL;
fmt_next:
fmt_cnt++;
}
@ -825,14 +855,16 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_probe_read_user_proto;
case BPF_FUNC_probe_read_kernel:
return &bpf_probe_read_kernel_proto;
case BPF_FUNC_probe_read:
return &bpf_probe_read_compat_proto;
case BPF_FUNC_probe_read_user_str:
return &bpf_probe_read_user_str_proto;
case BPF_FUNC_probe_read_kernel_str:
return &bpf_probe_read_kernel_str_proto;
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
case BPF_FUNC_probe_read:
return &bpf_probe_read_compat_proto;
case BPF_FUNC_probe_read_str:
return &bpf_probe_read_compat_str_proto;
#endif
#ifdef CONFIG_CGROUPS
case BPF_FUNC_get_current_cgroup_id:
return &bpf_get_current_cgroup_id_proto;

View File

@ -475,6 +475,12 @@ static void umh_clean_and_save_pid(struct subprocess_info *info)
{
struct umh_info *umh_info = info->data;
/* cleanup if umh_pipe_setup() was successful but exec failed */
if (info->pid && info->retval) {
fput(umh_info->pipe_to_umh);
fput(umh_info->pipe_from_umh);
}
argv_free(info->argv);
umh_info->pid = info->pid;
}

View File

@ -2168,6 +2168,10 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
* f full name
* P node name, including a possible unit address
* - 'x' For printing the address. Equivalent to "%lx".
* - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
* bpf_trace_printk() where [ku] prefix specifies either kernel (k)
* or user (u) memory to probe, and:
* s a string, equivalent to "%s" on direct vsnprintf() use
*
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
@ -2251,6 +2255,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
if (!IS_ERR(ptr))
break;
return err_ptr(buf, end, ptr, spec);
case 'u':
case 'k':
switch (fmt[1]) {
case 's':
return string(buf, end, ptr, spec);
default:
return error_string(buf, end, "(einval)", spec);
}
}
/* default is to _not_ leak addresses, hash before printing */

View File

@ -8907,11 +8907,13 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
&feature, lower->name);
lower->wanted_features &= ~feature;
netdev_update_features(lower);
__netdev_update_features(lower);
if (unlikely(lower->features & feature))
netdev_WARN(upper, "failed to disable %pNF on %s!\n",
&feature, lower->name);
else
netdev_features_change(lower);
}
}
}

View File

@ -2590,8 +2590,8 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
}
pop = 0;
} else if (pop >= sge->length - a) {
sge->length = a;
pop -= (sge->length - a);
sge->length = a;
}
}

View File

@ -236,6 +236,8 @@ static void net_prio_attach(struct cgroup_taskset *tset)
struct task_struct *p;
struct cgroup_subsys_state *css;
cgroup_sk_alloc_disable();
cgroup_taskset_for_each(p, css, tset) {
void *v = (void *)(unsigned long)css->id;

View File

@ -1258,7 +1258,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
return ret_val;
}
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
if (secattr->attr.mls.cat)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
return 0;
@ -1439,7 +1440,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
return ret_val;
}
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
if (secattr->attr.mls.cat)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
return 0;

View File

@ -109,8 +109,10 @@ static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
#define ipmr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \
lockdep_rtnl_is_held() || \
list_empty(&net->ipv4.mr_tables))
static struct mr_table *ipmr_mr_table_iter(struct net *net,
struct mr_table *mrt)

View File

@ -915,7 +915,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
if (peer->rate_tokens == 0 ||
if (peer->n_redirects == 0 ||
time_after(jiffies,
(peer->rate_last +
(ip_rt_redirect_load << peer->n_redirects)))) {

View File

@ -476,9 +476,17 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
int target, struct sock *sk)
{
return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
(sk->sk_prot->stream_memory_read ?
sk->sk_prot->stream_memory_read(sk) : false);
int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
if (avail > 0) {
if (avail >= target)
return true;
if (tcp_rmem_pressure(sk))
return true;
}
if (sk->sk_prot->stream_memory_read)
return sk->sk_prot->stream_memory_read(sk);
return false;
}
/*
@ -1756,10 +1764,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
down_read(&current->mm->mmap_sem);
ret = -EINVAL;
vma = find_vma(current->mm, address);
if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops)
goto out;
if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
up_read(&current->mm->mmap_sem);
return -EINVAL;
}
zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
tp = tcp_sk(sk);
@ -2154,13 +2163,15 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
tp->urg_data = 0;
tcp_fast_path_check(sk);
}
if (used + offset < skb->len)
continue;
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, &tss);
cmsg_flags |= 2;
}
if (used + offset < skb->len)
continue;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
if (!(flags & MSG_PEEK))

View File

@ -125,7 +125,6 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
if (!ret) {
msg->sg.start = i;
msg->sg.size -= apply_bytes;
sk_psock_queue_msg(psock, tmp);
sk_psock_data_ready(sk, psock);
} else {
@ -262,14 +261,17 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
struct sk_psock *psock;
int copied, ret;
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
psock = sk_psock_get(sk);
if (unlikely(!psock))
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
if (!skb_queue_empty(&sk->sk_receive_queue) &&
sk_psock_queue_empty(psock))
sk_psock_queue_empty(psock)) {
sk_psock_put(sk, psock);
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
}
lock_sock(sk);
msg_bytes_ready:
copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);

View File

@ -4757,7 +4757,8 @@ void tcp_data_ready(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
int avail = tp->rcv_nxt - tp->copied_seq;
if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE))
if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
!sock_flag(sk, SOCK_DONE))
return;
sk->sk_data_ready(sk);

View File

@ -1047,7 +1047,8 @@ static int calipso_opt_getattr(const unsigned char *calipso,
goto getattr_return;
}
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
if (secattr->attr.mls.cat)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
}
secattr->type = NETLBL_NLTYPE_CALIPSO;

View File

@ -2722,8 +2722,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;
if (dst_metric_locked(dst, RTAX_MTU))
return;
/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
* IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
* [see also comment in rt6_mtu_change_route()]
*/
if (iph) {
daddr = &iph->daddr;

View File

@ -1629,6 +1629,8 @@ bool mptcp_finish_join(struct sock *sk)
ret = mptcp_pm_allow_new_subflow(msk);
if (ret) {
subflow->map_seq = msk->ack_seq;
/* active connections are already on conn_list */
spin_lock_bh(&msk->join_list_lock);
if (!WARN_ON_ONCE(!list_empty(&subflow->node)))

View File

@ -1012,6 +1012,16 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
if (err)
return err;
/* the newly created socket really belongs to the owning MPTCP master
* socket, even if for additional subflows the allocation is performed
* by a kernel workqueue. Adjust inode references, so that the
* procfs/diag interaces really show this one belonging to the correct
* user.
*/
SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
subflow = mptcp_subflow_ctx(sf->sk);
pr_debug("subflow=%p", subflow);

View File

@ -1519,9 +1519,9 @@ __nf_conntrack_alloc(struct net *net,
ct->status = 0;
ct->timeout = 0;
write_pnet(&ct->ct_net, net);
memset(&ct->__nfct_init_offset[0], 0,
memset(&ct->__nfct_init_offset, 0,
offsetof(struct nf_conn, proto) -
offsetof(struct nf_conn, __nfct_init_offset[0]));
offsetof(struct nf_conn, __nfct_init_offset));
nf_ct_zone_add(ct, zone);
@ -2139,8 +2139,19 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
nf_conntrack_lock(lockp);
if (*bucket < nf_conntrack_htable_size) {
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
continue;
/* All nf_conn objects are added to hash table twice, one
* for original direction tuple, once for the reply tuple.
*
* Exception: In the IPS_NAT_CLASH case, only the reply
* tuple is added (the original tuple already existed for
* a different object).
*
* We only need to call the iterator once for each
* conntrack, so we just use the 'reply' direction
* tuple while iterating.
*/
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
goto found;

View File

@ -284,7 +284,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
if (nf_flow_has_expired(flow))
flow_offload_fixup_ct(flow->ct);
else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
else
flow_offload_fixup_ct_timeout(flow->ct);
flow_offload_free(flow);
@ -361,8 +361,10 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
struct nf_flowtable *flow_table = data;
if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
if (test_bit(NF_FLOW_HW, &flow->flags)) {
if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
nf_flow_offload_del(flow_table, flow);

View File

@ -817,6 +817,7 @@ static void flow_offload_work_handler(struct work_struct *work)
WARN_ON_ONCE(1);
}
clear_bit(NF_FLOW_HW_PENDING, &offload->flow->flags);
kfree(offload);
}
@ -831,10 +832,15 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
{
struct flow_offload_work *offload;
offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
if (!offload)
if (test_and_set_bit(NF_FLOW_HW_PENDING, &flow->flags))
return NULL;
offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
if (!offload) {
clear_bit(NF_FLOW_HW_PENDING, &flow->flags);
return NULL;
}
offload->cmd = cmd;
offload->flow = flow;
offload->priority = flowtable->priority;
@ -1056,7 +1062,7 @@ static struct flow_indr_block_entry block_ing_entry = {
int nf_flow_table_offload_init(void)
{
nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload",
WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
WQ_UNBOUND, 0);
if (!nf_flow_offload_wq)
return -ENOMEM;

View File

@ -79,6 +79,10 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
parent = rcu_dereference_raw(parent->rb_left);
continue;
}
if (nft_set_elem_expired(&rbe->ext))
return false;
if (nft_rbtree_interval_end(rbe)) {
if (nft_set_is_anonymous(set))
return false;
@ -94,6 +98,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
!nft_set_elem_expired(&interval->ext) &&
nft_rbtree_interval_start(interval)) {
*ext = &interval->ext;
return true;
@ -154,6 +159,9 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
continue;
}
if (nft_set_elem_expired(&rbe->ext))
return false;
if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
(*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
(flags & NFT_SET_ELEM_INTERVAL_END)) {
@ -170,6 +178,7 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
!nft_set_elem_expired(&interval->ext) &&
((!nft_rbtree_interval_end(interval) &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) ||
(nft_rbtree_interval_end(interval) &&
@ -418,6 +427,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
if (iter->count < iter->skip)
goto cont;
if (nft_set_elem_expired(&rbe->ext))
goto cont;
if (!nft_set_elem_active(&rbe->ext, iter->genmask))
goto cont;

View File

@ -734,6 +734,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
if ((off & (BITS_PER_LONG - 1)) != 0)
return -EINVAL;
/* a null catmap is equivalent to an empty one */
if (!catmap) {
*offset = (u32)-1;
return 0;
}
if (off < catmap->startbit) {
off = catmap->startbit;
*offset = off;

View File

@ -1739,22 +1739,21 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
return 0;
}
static void tipc_sk_send_ack(struct tipc_sock *tsk)
static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct sk_buff *skb = NULL;
struct tipc_msg *msg;
u32 peer_port = tsk_peer_port(tsk);
u32 dnode = tsk_peer_node(tsk);
if (!tipc_sk_connected(sk))
return;
return NULL;
skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
dnode, tsk_own_node(tsk), peer_port,
tsk->portid, TIPC_OK);
if (!skb)
return;
return NULL;
msg = buf_msg(skb);
msg_set_conn_ack(msg, tsk->rcv_unacked);
tsk->rcv_unacked = 0;
@ -1764,7 +1763,19 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
msg_set_adv_win(msg, tsk->rcv_win);
}
tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
return skb;
}
static void tipc_sk_send_ack(struct tipc_sock *tsk)
{
struct sk_buff *skb;
skb = tipc_sk_build_ack(tsk);
if (!skb)
return;
tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
msg_link_selector(buf_msg(skb)));
}
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@ -1938,7 +1949,6 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
bool peek = flags & MSG_PEEK;
int offset, required, copy, copied = 0;
int hlen, dlen, err, rc;
bool ack = false;
long timeout;
/* Catch invalid receive attempts */
@ -1983,7 +1993,6 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
/* Copy data if msg ok, otherwise return error/partial data */
if (likely(!err)) {
ack = msg_ack_required(hdr);
offset = skb_cb->bytes_read;
copy = min_t(int, dlen - offset, buflen - copied);
rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
@ -2011,7 +2020,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
/* Send connection flow control advertisement when applicable */
tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
if (ack || tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
tipc_sk_send_ack(tsk);
/* Exit if all requested data or FIN/error received */
@ -2105,9 +2114,11 @@ static void tipc_sk_proto_rcv(struct sock *sk,
* tipc_sk_filter_connect - check incoming message for a connection-based socket
* @tsk: TIPC socket
* @skb: pointer to message buffer.
* @xmitq: for Nagle ACK if any
* Returns true if message should be added to receive queue, false otherwise
*/
static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
@ -2171,8 +2182,17 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
if (!skb_queue_empty(&sk->sk_write_queue))
tipc_sk_push_backlog(tsk);
/* Accept only connection-based messages sent by peer */
if (likely(con_msg && !err && pport == oport && pnode == onode))
if (likely(con_msg && !err && pport == oport &&
pnode == onode)) {
if (msg_ack_required(hdr)) {
struct sk_buff *skb;
skb = tipc_sk_build_ack(tsk);
if (skb)
__skb_queue_tail(xmitq, skb);
}
return true;
}
if (!tsk_peer_msg(tsk, hdr))
return false;
if (!err)
@ -2267,7 +2287,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
while ((skb = __skb_dequeue(&inputq))) {
hdr = buf_msg(skb);
limit = rcvbuf_limit(sk, skb);
if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
(!sk_conn && msg_connected(hdr)) ||
(!grp && msg_in_group(hdr)))
err = TIPC_ERR_NO_PORT;

View File

@ -96,6 +96,16 @@ void tipc_sub_get(struct tipc_subscription *subscription);
(swap_ ? swab32(val__) : val__); \
})
/* tipc_sub_write - write val_ to field_ of struct sub_ in user endian format
*/
#define tipc_sub_write(sub_, field_, val_) \
({ \
struct tipc_subscr *sub__ = sub_; \
u32 val__ = val_; \
int swap_ = !((sub__)->filter & TIPC_FILTER_MASK); \
(sub__)->field_ = swap_ ? swab32(val__) : val__; \
})
/* tipc_evt_write - write val_ to field_ of struct evt_ in user endian format
*/
#define tipc_evt_write(evt_, field_, val_) \

View File

@ -237,8 +237,8 @@ static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
tipc_sub_unsubscribe(sub);
atomic_dec(&tn->subscription_count);
} else if (s) {
break;
if (s)
break;
}
}
spin_unlock_bh(&con->sub_lock);
@ -362,9 +362,10 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
{
struct tipc_net *tn = tipc_net(srv->net);
struct tipc_subscription *sub;
u32 s_filter = tipc_sub_read(s, filter);
if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
if (s_filter & TIPC_SUB_CANCEL) {
tipc_sub_write(s, filter, s_filter & ~TIPC_SUB_CANCEL);
tipc_conn_delete_sub(con, s);
return 0;
}
@ -400,7 +401,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
return -EWOULDBLOCK;
if (ret == sizeof(s)) {
read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s);
/* RACE: the connection can be closed in the meantime */
if (likely(connected(con)))
ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
if (!ret)
return 0;

View File

@ -15,8 +15,6 @@
#define MAX_INDEX 64
#define MAX_STARS 38
char bpf_log_buf[BPF_LOG_BUF_SIZE];
static void stars(char *str, long val, long max, int width)
{
int i;

View File

@ -148,11 +148,11 @@ struct pt_regs;
#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), grps[14])
#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[14])
#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), pdw.addr)
#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), psw.addr)
#elif defined(bpf_target_arm)

View File

@ -217,6 +217,14 @@ void test_mmap(void)
munmap(tmp2, 4 * page_size);
/* map all 4 pages, but with pg_off=1 page, should fail */
tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
data_map_fd, page_size /* initial page shift */);
if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
munmap(tmp1, 4 * page_size);
goto cleanup;
}
tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
goto cleanup;

View File

@ -30,13 +30,13 @@ int prog3(struct bpf_raw_tracepoint_args *ctx)
SEC("fentry/__set_task_comm")
int BPF_PROG(prog4, struct task_struct *tsk, const char *buf, bool exec)
{
return !tsk;
return 0;
}
SEC("fexit/__set_task_comm")
int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
{
return !tsk;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -30,7 +30,7 @@ ret=0
cleanup()
{
rm -f $out
rm -f $err
ip netns del $ns1
}