Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Handle errors mid-stream of an all dump, from Alexey Kodanev.

 2) Fix build of openvswitch with certain combinations of netfilter
    options, from Arnd Bergmann.

 3) Fix interactions between GSO and BQL, from Eric Dumazet.

 4) Don't put a '/' in RTL8201F's sysfs file name, from Holger
    Hoffstätte.

 5) S390 qeth driver fixes from Julian Wiedmann.

 6) Allow ipv6 link local addresses for netconsole when both source and
    destination are link local, from Matwey V. Kornilov.

 7) Fix the BPF program address seen in /proc/kallsyms, from Song Liu.

 8) Initialize mutex before use in dsa microchip driver, from Tristram
    Ha.

 9) Out-of-bounds access in hns3, from Yunsheng Lin.

10) Various netfilter fixes from Stefano Brivio, Jozsef Kadlecsik, Jiri
    Slaby, Florian Westphal, Eric Westbrook, Andrey Ryabinin, and Pablo
    Neira Ayuso.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (50 commits)
  net: alx: make alx_drv_name static
  net: bpfilter: fix iptables failure if bpfilter_umh is disabled
  sock_diag: fix autoloading of the raw_diag module
  net: core: netpoll: Enable netconsole IPv6 link local address
  ipv6: properly check return value in inet6_dump_all()
  rtnetlink: restore handling of dumpit return value in rtnl_dump_all()
  net/ipv6: Move anycast init/cleanup functions out of CONFIG_PROC_FS
  bonding/802.3ad: fix link_failure_count tracking
  net: phy: realtek: fix RTL8201F sysfs name
  sctp: define SCTP_SS_DEFAULT for Stream schedulers
  sctp: fix strchange_flags name for Stream Change Event
  mlxsw: spectrum: Fix IP2ME CPU policer configuration
  openvswitch: fix linking without CONFIG_NF_CONNTRACK_LABELS
  qed: fix link config error handling
  net: hns3: Fix for out-of-bounds access when setting pfc back pressure
  net/mlx4_en: use __netdev_tx_sent_queue()
  net: do not abort bulk send on BQL status
  net: bql: add __netdev_tx_sent_queue()
  s390/qeth: report 25Gbit link speed
  s390/qeth: sanitize ARP requests
  ...
This commit is contained in:
Linus Torvalds 2018-11-06 07:44:04 -08:00
commit a13511dfa8
60 changed files with 653 additions and 563 deletions

View File

@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_CHANGE:
/* For 802.3ad mode only:
* Getting invalid Speed/Duplex values here will put slave
* in weird state. So mark it as link-down for the time
* in weird state. So mark it as link-fail for the time
* being and let link-monitoring (miimon) set it right when
* correct speeds/duplex are available.
*/
if (bond_update_speed_duplex(slave) &&
BOND_MODE(bond) == BOND_MODE_8023AD)
slave->link = BOND_LINK_DOWN;
slave->link = BOND_LINK_FAIL;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);

View File

@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
{
int i;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
dev->ds->ops = &ksz_switch_ops;
for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
if (ksz_switch_detect(dev))
return -EINVAL;

View File

@ -140,6 +140,5 @@ struct alx_priv {
};
extern const struct ethtool_ops alx_ethtool_ops;
extern const char alx_drv_name[];
#endif

View File

@ -49,7 +49,7 @@
#include "hw.h"
#include "reg.h"
const char alx_drv_name[] = "alx";
static const char alx_drv_name[] = "alx";
static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
{

View File

@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
intrl2_1_mask_clear(priv, 0xffffffff);
else
intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
/* Last call before we start the real business */
netif_tx_start_all_queues(dev);
}
static void rbuf_init(struct bcm_sysport_priv *priv)
@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev)
bcm_sysport_netif_start(dev);
netif_tx_start_all_queues(dev);
return 0;
out_clear_rx_int:
@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
struct bcm_sysport_priv *priv = netdev_priv(dev);
/* stop all software from updating hardware */
netif_tx_stop_all_queues(dev);
netif_tx_disable(dev);
napi_disable(&priv->napi);
cancel_work_sync(&priv->dim.dim.work);
phy_stop(dev->phydev);
@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
bcm_sysport_netif_stop(dev);
phy_suspend(dev->phydev);
netif_device_detach(dev);
/* Disable UniMAC RX */
umac_enable_set(priv, CMD_RX_EN, 0);
@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
goto out_free_rx_ring;
}
netif_device_attach(dev);
/* RX pipe enable */
topctrl_writel(priv, 0, RX_FLUSH_CNTL);
@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
bcm_sysport_netif_start(dev);
netif_device_attach(dev);
return 0;
out_free_rx_ring:

View File

@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
netif_tx_start_all_queues(dev);
bcmgenet_enable_tx_napi(priv);
/* Monitor link interrupts now */
@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
return 0;
err_irq1:
@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
bcmgenet_disable_tx_napi(priv);
netif_tx_stop_all_queues(dev);
netif_tx_disable(dev);
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
bcmgenet_netif_stop(dev);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
netif_device_detach(dev);
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
/* Always enable ring 16 - descriptor ring */
bcmgenet_enable_dma(priv, dma_ctrl);
netif_device_attach(dev);
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_netif_start(dev);
netif_device_attach(dev);
return 0;
out_clk_disable:

View File

@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
*/
static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
{
struct hclge_vport *vport = hdev->vport;
u32 i, k, qs_bitmap;
int ret;
int i;
for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
qs_bitmap = 0;
u32 qs_bitmap = 0;
int k, ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
struct hclge_vport *vport = &hdev->vport[k];
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
vport++;
}
ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);

View File

@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
}
send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes,
skb->xmit_more);
real_size = (real_size / 16) & 0x3f;

View File

@ -3568,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
burst_size = 7;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
is_bytes = true;
rate = 4 * 1024;
burst_size = 4;
break;

View File

@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_speed_mask)
{
u32 transceiver_type, transceiver_state;
int ret;
qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
&transceiver_type);
ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
&transceiver_type);
if (ret)
return ret;
if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
false)

View File

@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
.flags = PHY_HAS_INTERRUPT,
}, {
.phy_id = 0x001cc816,
.name = "RTL8201F 10/100Mbps Ethernet",
.name = "RTL8201F Fast Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,

View File

@ -1598,6 +1598,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
return ret;
}
cancel_delayed_work_sync(&pdata->carrier_check);
if (pdata->suspend_flags) {
netdev_warn(dev->net, "error during last resume\n");
pdata->suspend_flags = 0;
@ -1840,6 +1842,11 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
*/
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
if (ret)
schedule_delayed_work(&pdata->carrier_check,
CARRIER_CHECK_DELAY);
return ret;
}

View File

@ -87,6 +87,18 @@ struct qeth_dbf_info {
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
static inline u32 qeth_get_device_id(struct ccw_device *cdev)
{
struct ccw_dev_id dev_id;
u32 id;
ccw_device_get_id(cdev, &dev_id);
id = dev_id.devno;
id |= (u32) (dev_id.ssid << 16);
return id;
}
/*
* Common IO related definitions
*/
@ -97,7 +109,8 @@ struct qeth_dbf_info {
#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
/**
* card stuff
@ -830,6 +843,11 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
static inline bool qeth_netdev_is_registered(struct net_device *dev)
{
return dev->netdev_ops != NULL;
}
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
@ -973,7 +991,7 @@ int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
@ -1028,11 +1046,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
long,
int (*reply_cb)(struct qeth_card *,
struct qeth_reply *, unsigned long),
void *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,

View File

@ -167,6 +167,8 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "OSD_1000";
case QETH_LINK_TYPE_10GBIT_ETH:
return "OSD_10GIG";
case QETH_LINK_TYPE_25GBIT_ETH:
return "OSD_25GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
@ -554,8 +556,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
if (!iob) {
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
"available\n", dev_name(&card->gdev->dev));
QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
CARD_DEVID(card));
return -ENOMEM;
}
qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
@ -563,8 +565,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
if (rc) {
QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
"rc=%i\n", dev_name(&card->gdev->dev), rc);
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
@ -613,16 +615,14 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
const char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
"x%X \"%s\"\n",
ipa_name, com, dev_name(&card->gdev->dev),
QETH_CARD_IFNAME(card), rc,
qeth_get_ipa_msg(rc));
QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
ipa_name, com, CARD_DEVID(card), rc,
qeth_get_ipa_msg(rc));
else
QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
ipa_name, com, dev_name(&card->gdev->dev),
QETH_CARD_IFNAME(card));
QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
ipa_name, com, CARD_DEVID(card));
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@ -711,7 +711,7 @@ static int qeth_check_idx_response(struct qeth_card *card,
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
buffer[4]);
QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm");
@ -972,8 +972,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
dev_name(&cdev->dev), dstat, cstat);
QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return 1;
@ -1013,8 +1013,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
switch (PTR_ERR(irb)) {
case -EIO:
QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
dev_name(&cdev->dev));
QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
@ -1031,8 +1031,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
}
break;
default:
QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
dev_name(&cdev->dev), PTR_ERR(irb));
QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
PTR_ERR(irb), CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
}
@ -1114,9 +1114,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
dev_warn(&channel->ccwdev->dev,
"The qeth device driver failed to recover "
"an error on the device\n");
QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
"0x%X dstat 0x%X\n",
dev_name(&channel->ccwdev->dev), cstat, dstat);
QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
CCW_DEVID(channel->ccwdev), cstat,
dstat);
print_hex_dump(KERN_WARNING, "qeth: irb ",
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
print_hex_dump(KERN_WARNING, "qeth: sense data ",
@ -1890,8 +1890,8 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
if (channel->state != CH_STATE_ACTIVATING) {
dev_warn(&channel->ccwdev->dev, "The qeth device driver"
" failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
dev_name(&channel->ccwdev->dev));
QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
CCW_DEVID(channel->ccwdev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
return -ETIME;
}
@ -1926,17 +1926,15 @@ static void qeth_idx_write_cb(struct qeth_card *card,
"The adapter is used exclusively by another "
"host\n");
else
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
" negative reply\n",
dev_name(&channel->ccwdev->dev));
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
CCW_DEVID(channel->ccwdev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
"function level mismatch (sent: 0x%x, received: "
"0x%x)\n", dev_name(&channel->ccwdev->dev),
card->info.func_level, temp);
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
CCW_DEVID(channel->ccwdev),
card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
@ -1973,9 +1971,8 @@ static void qeth_idx_read_cb(struct qeth_card *card,
"insufficient authorization\n");
break;
default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&channel->ccwdev->dev));
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
CCW_DEVID(channel->ccwdev));
}
QETH_CARD_TEXT_(card, 2, "idxread%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
@ -1984,10 +1981,9 @@ static void qeth_idx_read_cb(struct qeth_card *card,
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
"level mismatch (sent: 0x%x, received: 0x%x)\n",
dev_name(&channel->ccwdev->dev),
card->info.func_level, temp);
QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
CCW_DEVID(channel->ccwdev),
card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
@ -2096,9 +2092,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
(addr_t) iob, 0, 0, event_timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
dev_name(&channel->ccwdev->dev), rc);
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irq(&card->lock);
list_del_init(&reply->list);
@ -2853,8 +2848,8 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
} else {
dev_warn(&card->gdev->dev,
"The qeth driver ran out of channel command buffers\n");
QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
dev_name(&card->gdev->dev));
QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
CARD_DEVID(card));
}
return iob;
@ -2989,10 +2984,9 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
return 0;
default:
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
"rc=%d\n",
dev_name(&card->gdev->dev),
cmd->hdr.return_code);
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
CARD_DEVID(card),
cmd->hdr.return_code);
return 0;
}
}
@ -3004,8 +2998,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
} else
QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
"\n", dev_name(&card->gdev->dev));
QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
CARD_DEVID(card));
return 0;
}
@ -4297,10 +4291,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
cmd->data.setadapterparms.hdr.return_code);
if (cmd->data.setadapterparms.hdr.return_code !=
SET_ACCESS_CTRL_RC_SUCCESS)
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
card->gdev->dev.kobj.name,
access_ctrl_req->subcmd_code,
cmd->data.setadapterparms.hdr.return_code);
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
access_ctrl_req->subcmd_code, CARD_DEVID(card),
cmd->data.setadapterparms.hdr.return_code);
switch (cmd->data.setadapterparms.hdr.return_code) {
case SET_ACCESS_CTRL_RC_SUCCESS:
if (card->options.isolation == ISOLATION_MODE_NONE) {
@ -4312,14 +4305,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
}
break;
case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
"deactivated\n", dev_name(&card->gdev->dev));
QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
" activated\n", dev_name(&card->gdev->dev));
QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
@ -4405,10 +4398,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation, fallback);
if (rc) {
QETH_DBF_MESSAGE(3,
"IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
card->gdev->dev.kobj.name,
rc);
QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
rc, CARD_DEVID(card));
rc = -EOPNOTSUPP;
}
} else if (card->options.isolation != ISOLATION_MODE_NONE) {
@ -4443,7 +4434,8 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_OSN) &&
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
@ -4634,8 +4626,8 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
qeth_snmp_command_cb, (void *)&qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
QETH_CARD_IFNAME(card), rc);
QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
CARD_DEVID(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
@ -4869,8 +4861,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
dev_name(&card->gdev->dev), rc);
QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_offline;
}
@ -5086,7 +5078,7 @@ static struct ccw_driver qeth_ccw_driver = {
.remove = ccwgroup_remove_ccwdev,
};
int qeth_core_hardsetup_card(struct qeth_card *card)
int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
@ -5096,8 +5088,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
qeth_update_from_chp_desc(card);
retry:
if (retries < 3)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
dev_name(&card->gdev->dev));
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
CARD_DEVID(card));
rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
@ -5161,13 +5153,20 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
if (rc == IPA_RC_LAN_OFFLINE) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
netif_carrier_off(card->dev);
*carrier_ok = false;
} else {
rc = -ENODEV;
goto out;
}
} else {
netif_carrier_on(card->dev);
*carrier_ok = true;
}
if (qeth_netdev_is_registered(card->dev)) {
if (*carrier_ok)
netif_carrier_on(card->dev);
else
netif_carrier_off(card->dev);
}
card->options.ipa4.supported_funcs = 0;
@ -5201,8 +5200,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
"an error on the device\n");
QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
dev_name(&card->gdev->dev), rc);
QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
CARD_DEVID(card), rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
@ -5481,11 +5480,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
int qeth_send_setassparms(struct qeth_card *card,
struct qeth_cmd_buffer *iob, __u16 len, long data,
int (*reply_cb)(struct qeth_card *,
struct qeth_reply *, unsigned long),
void *reply_param)
static int qeth_send_setassparms(struct qeth_card *card,
struct qeth_cmd_buffer *iob, u16 len,
long data, int (*reply_cb)(struct qeth_card *,
struct qeth_reply *,
unsigned long),
void *reply_param)
{
int rc;
struct qeth_ipa_cmd *cmd;
@ -5501,7 +5501,6 @@ int qeth_send_setassparms(struct qeth_card *card,
rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_setassparms);
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
@ -6170,8 +6169,14 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
WARN_ON_ONCE(1);
}
/* fallthrough from high to low, to select all legal speeds: */
/* partially does fall through, to also select lower speeds */
switch (maxspeed) {
case SPEED_25000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
25000baseSR_Full);
break;
case SPEED_10000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
@ -6254,6 +6259,10 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
cmd->base.speed = SPEED_10000;
cmd->base.port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_25GBIT_ETH:
cmd->base.speed = SPEED_25000;
cmd->base.port = PORT_FIBRE;
break;
default:
cmd->base.speed = SPEED_10;
cmd->base.port = PORT_TP;
@ -6320,6 +6329,9 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
case CARD_INFO_PORTS_10G:
cmd->base.speed = SPEED_10000;
break;
case CARD_INFO_PORTS_25G:
cmd->base.speed = SPEED_25000;
break;
}
return 0;

View File

@ -90,6 +90,7 @@ enum qeth_link_types {
QETH_LINK_TYPE_GBIT_ETH = 0x03,
QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
QETH_LINK_TYPE_25GBIT_ETH = 0x12,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
QETH_LINK_TYPE_LANE_TR = 0x82,
QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
@ -347,6 +348,7 @@ enum qeth_card_info_port_speed {
CARD_INFO_PORTS_100M = 0x00000006,
CARD_INFO_PORTS_1G = 0x00000007,
CARD_INFO_PORTS_10G = 0x00000008,
CARD_INFO_PORTS_25G = 0x0000000A,
};
/* (SET)DELIP(M) IPA stuff ***************************************************/
@ -436,7 +438,7 @@ struct qeth_ipacmd_setassparms {
__u32 flags_32bit;
struct qeth_ipa_caps caps;
struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
__u8 ip[16];

View File

@ -146,11 +146,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EEXIST)
QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
mac, QETH_CARD_IFNAME(card));
QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
CARD_DEVID(card));
else if (rc)
QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
mac, QETH_CARD_IFNAME(card), rc);
QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
CARD_DEVID(card), rc);
return rc;
}
@ -163,8 +163,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Rmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
mac, QETH_CARD_IFNAME(card), rc);
QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
CARD_DEVID(card), rc);
return rc;
}
@ -260,9 +260,9 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "L2sdvcb");
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
cmd->data.setdelvlan.vlan_id,
QETH_CARD_IFNAME(card), cmd->hdr.return_code);
CARD_DEVID(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
@ -455,8 +455,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
rc = qeth_vm_request_mac(card);
if (!rc)
goto out;
QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
CARD_BUS_ID(card), rc);
QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
/* fall back to alternative mechanism: */
}
@ -468,8 +468,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
rc = qeth_setadpparms_change_macaddr(card);
if (!rc)
goto out;
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
CARD_BUS_ID(card), rc);
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
/* fall back once more: */
}
@ -826,7 +826,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
unregister_netdev(card->dev);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
}
static const struct ethtool_ops qeth_l2_ethtool_ops = {
@ -862,11 +863,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_features = qeth_set_features
};
static int qeth_l2_setup_netdev(struct qeth_card *card)
static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
int rc;
if (card->dev->netdev_ops)
if (qeth_netdev_is_registered(card->dev))
return 0;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@ -919,6 +920,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
if (!rc && carrier_ok)
netif_carrier_on(card->dev);
if (rc)
card->dev->netdev_ops = NULL;
return rc;
@ -949,6 +953,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
@ -956,7 +961,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
@ -967,7 +972,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
rc = qeth_l2_setup_netdev(card);
rc = qeth_l2_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;

View File

@ -278,9 +278,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
QETH_CARD_TEXT(card, 4, "clearip");
if (recover && card->options.sniffer)
return;
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
@ -494,9 +491,8 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
QETH_PROT_IPV4);
if (rc) {
card->options.route4.type = NO_ROUTER;
QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
" on %s. Type set to 'no router'.\n", rc,
QETH_CARD_IFNAME(card));
QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
rc, CARD_DEVID(card));
}
return rc;
}
@ -518,9 +514,8 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
QETH_PROT_IPV6);
if (rc) {
card->options.route6.type = NO_ROUTER;
QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
" on %s. Type set to 'no router'.\n", rc,
QETH_CARD_IFNAME(card));
QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
rc, CARD_DEVID(card));
}
return rc;
}
@ -663,6 +658,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int rc = 0;
int cnt = 3;
if (card->options.sniffer)
return 0;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "setaddr4");
@ -697,6 +694,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
{
int rc = 0;
if (card->options.sniffer)
return 0;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "deladdr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@ -1070,8 +1070,8 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
}
break;
default:
QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
cmd->data.diagass.action, QETH_CARD_IFNAME(card));
QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
cmd->data.diagass.action, CARD_DEVID(card));
}
return 0;
@ -1517,32 +1517,25 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
qeth_l3_handle_promisc_mode(card);
}
static const char *qeth_l3_arp_get_error_cause(int *rc)
static int qeth_l3_arp_makerc(int rc)
{
switch (*rc) {
case QETH_IPA_ARP_RC_FAILED:
*rc = -EIO;
return "operation failed";
switch (rc) {
case IPA_RC_SUCCESS:
return 0;
case QETH_IPA_ARP_RC_NOTSUPP:
*rc = -EOPNOTSUPP;
return "operation not supported";
case QETH_IPA_ARP_RC_OUT_OF_RANGE:
*rc = -EINVAL;
return "argument out of range";
case QETH_IPA_ARP_RC_Q_NOTSUPP:
*rc = -EOPNOTSUPP;
return "query operation not supported";
return -EOPNOTSUPP;
case QETH_IPA_ARP_RC_OUT_OF_RANGE:
return -EINVAL;
case QETH_IPA_ARP_RC_Q_NO_DATA:
*rc = -ENOENT;
return "no query data available";
return -ENOENT;
default:
return "unknown error";
return -EIO;
}
}
static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
{
int tmp;
int rc;
QETH_CARD_TEXT(card, 3, "arpstnoe");
@ -1560,13 +1553,10 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
no_entries);
if (rc) {
tmp = rc;
QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
"%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
}
return rc;
if (rc)
QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
CARD_DEVID(card), rc);
return qeth_l3_arp_makerc(rc);
}
static __u32 get_arp_entry_size(struct qeth_card *card,
@ -1716,7 +1706,6 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
int tmp;
int rc;
QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
@ -1735,15 +1724,10 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
rc = qeth_l3_send_ipa_arp_cmd(card, iob,
QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
qeth_l3_arp_query_cb, (void *)qinfo);
if (rc) {
tmp = rc;
QETH_DBF_MESSAGE(2,
"Error while querying ARP cache on %s: %s "
"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
}
return rc;
if (rc)
QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@ -1793,15 +1777,18 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
return rc;
}
static int qeth_l3_arp_add_entry(struct qeth_card *card,
struct qeth_arp_cache_entry *entry)
static int qeth_l3_arp_modify_entry(struct qeth_card *card,
struct qeth_arp_cache_entry *entry,
enum qeth_arp_process_subcmds arp_cmd)
{
struct qeth_arp_cache_entry *cmd_entry;
struct qeth_cmd_buffer *iob;
char buf[16];
int tmp;
int rc;
QETH_CARD_TEXT(card, 3, "arpadent");
if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
QETH_CARD_TEXT(card, 3, "arpadd");
else
QETH_CARD_TEXT(card, 3, "arpdel");
/*
* currently GuestLAN only supports the ARP assist function
@ -1814,71 +1801,25 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
return -EOPNOTSUPP;
}
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_ADD_ENTRY,
sizeof(struct qeth_arp_cache_entry),
QETH_PROT_IPV4);
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
sizeof(*cmd_entry), QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
rc = qeth_send_setassparms(card, iob,
sizeof(struct qeth_arp_cache_entry),
(unsigned long) entry,
qeth_setassparms_cb, NULL);
if (rc) {
tmp = rc;
qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
"on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
}
return rc;
}
static int qeth_l3_arp_remove_entry(struct qeth_card *card,
struct qeth_arp_cache_entry *entry)
{
struct qeth_cmd_buffer *iob;
char buf[16] = {0, };
int tmp;
int rc;
cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
if (rc)
QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
arp_cmd, CARD_DEVID(card), rc);
QETH_CARD_TEXT(card, 3, "arprment");
/*
* currently GuestLAN only supports the ARP assist function
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
* thus we say EOPNOTSUPP for this ARP function
*/
if (card->info.guestlan)
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
}
memcpy(buf, entry, 12);
iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_REMOVE_ENTRY,
12,
QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
rc = qeth_send_setassparms(card, iob,
12, (unsigned long)buf,
qeth_setassparms_cb, NULL);
if (rc) {
tmp = rc;
memset(buf, 0, 16);
qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
" on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
}
return rc;
return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_flush_cache(struct qeth_card *card)
{
int rc;
int tmp;
QETH_CARD_TEXT(card, 3, "arpflush");
@ -1894,19 +1835,17 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
if (rc) {
tmp = rc;
QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
}
return rc;
if (rc)
QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
enum qeth_arp_process_subcmds arp_cmd;
int rc = 0;
switch (cmd) {
@ -1925,27 +1864,16 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
sizeof(struct qeth_arp_cache_entry)))
rc = -EFAULT;
else
rc = qeth_l3_arp_add_entry(card, &arp_entry);
break;
case SIOC_QETH_ARP_REMOVE_ENTRY:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
sizeof(struct qeth_arp_cache_entry)))
rc = -EFAULT;
else
rc = qeth_l3_arp_remove_entry(card, &arp_entry);
break;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
return -EFAULT;
arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
IPA_CMD_ASS_ARP_ADD_ENTRY :
IPA_CMD_ASS_ARP_REMOVE_ENTRY;
return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
case SIOC_QETH_ARP_FLUSH_CACHE:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
@ -2383,12 +2311,12 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
static int qeth_l3_setup_netdev(struct qeth_card *card)
static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
unsigned int headroom;
int rc;
if (card->dev->netdev_ops)
if (qeth_netdev_is_registered(card->dev))
return 0;
if (card->info.type == QETH_CARD_TYPE_OSD ||
@ -2457,6 +2385,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
if (!rc && carrier_ok)
netif_carrier_on(card->dev);
out:
if (rc)
card->dev->netdev_ops = NULL;
@ -2497,7 +2428,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
unregister_netdev(card->dev);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
@ -2507,6 +2439,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
@ -2514,14 +2447,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
rc = qeth_l3_setup_netdev(card);
rc = qeth_l3_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;

View File

@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
/* Variant of netdev_tx_sent_queue() for drivers that are aware
* that they should not test BQL status themselves.
* We do want to change __QUEUE_STATE_STACK_XOFF only for the last
* skb of a batch.
* Returns true if the doorbell must be used to kick the NIC.
*/
static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes,
bool xmit_more)
{
if (xmit_more) {
#ifdef CONFIG_BQL
dql_queued(&dev_queue->dql, bytes);
#endif
return netif_tx_queue_stopped(dev_queue);
}
netdev_tx_sent_queue(dev_queue, bytes);
return true;
}
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device

View File

@ -314,7 +314,7 @@ enum {
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);

View File

@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
/* Used only when dumping a set, protected by rcu_read_lock_bh() */
/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;

View File

@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
int ipv6_anycast_init(void);
void ipv6_anycast_cleanup(void);
/* Device notifier */
int register_inet6addr_notifier(struct notifier_block *nb);

View File

@ -146,10 +146,12 @@ struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
struct rcu_head rcu;
};
#define IFA_HOST IPV6_ADDR_LOOPBACK

View File

@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.generic;
}
static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.tcp;
}
static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.udp;
}
static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.icmp;
}
static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.icmpv6;
}
#ifdef CONFIG_NF_CT_PROTO_DCCP
static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.dccp;
}
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.sctp;
}
#endif
#endif /*_NF_CONNTRACK_PROTOCOL_H*/

View File

@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
NFTA_NG_SET_NAME,
NFTA_NG_SET_ID,
NFTA_NG_SET_NAME, /* deprecated */
NFTA_NG_SET_ID, /* deprecated */
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)

View File

@ -11,6 +11,10 @@
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#ifndef __KERNEL__
#include <limits.h> /* for INT_MIN, INT_MAX */
#endif
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
#define NF_BR_PRE_ROUTING 0

View File

@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
#define SCTP_ASSOC_CHANGE_DENIED 0x0004
#define SCTP_ASSOC_CHANGE_FAILED 0x0008
#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
struct sctp_stream_change_event {
__u16 strchange_type;
__u16 strchange_flags;
@ -1151,6 +1153,7 @@ struct sctp_add_streams {
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR

View File

@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
unsigned long symbol_start, symbol_end;
struct bpf_prog_aux *aux;
unsigned int it = 0;
int ret = -ERANGE;
@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
if (it++ != symnum)
continue;
bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
bpf_get_prog_name(aux->prog, sym);
*value = symbol_start;
*value = (unsigned long)aux->prog->bpf_func;
*type = BPF_SYM_ELF_TYPE;
ret = 0;

View File

@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
info.nr_jited_func_lens = 0;
goto done;
}
@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
ulen = info.nr_jited_ksyms;
info.nr_jited_ksyms = prog->aux->func_cnt;
info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
if (info.nr_jited_ksyms && ulen) {
if (bpf_dump_raw_ok()) {
unsigned long ksym_addr;
u64 __user *user_ksyms;
ulong ksym_addr;
u32 i;
/* copy the address of the kernel symbol
@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
*/
ulen = min_t(u32, info.nr_jited_ksyms, ulen);
user_ksyms = u64_to_user_ptr(info.jited_ksyms);
for (i = 0; i < ulen; i++) {
ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
ksym_addr &= PAGE_MASK;
if (put_user((u64) ksym_addr, &user_ksyms[i]))
if (prog->aux->func_cnt) {
for (i = 0; i < ulen; i++) {
ksym_addr = (unsigned long)
prog->aux->func[i]->bpf_func;
if (put_user((u64) ksym_addr,
&user_ksyms[i]))
return -EFAULT;
}
} else {
ksym_addr = (unsigned long) prog->bpf_func;
if (put_user((u64) ksym_addr, &user_ksyms[0]))
return -EFAULT;
}
} else {
@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
ulen = info.nr_jited_func_lens;
info.nr_jited_func_lens = prog->aux->func_cnt;
info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
if (info.nr_jited_func_lens && ulen) {
if (bpf_dump_raw_ok()) {
u32 __user *user_lens;
@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
/* copy the JITed image lengths for each function */
ulen = min_t(u32, info.nr_jited_func_lens, ulen);
user_lens = u64_to_user_ptr(info.jited_func_lens);
for (i = 0; i < ulen; i++) {
func_len = prog->aux->func[i]->jited_len;
if (put_user(func_len, &user_lens[i]))
if (prog->aux->func_cnt) {
for (i = 0; i < ulen; i++) {
func_len =
prog->aux->func[i]->jited_len;
if (put_user(func_len, &user_lens[i]))
return -EFAULT;
}
} else {
func_len = prog->jited_len;
if (put_user(func_len, &user_lens[0]))
return -EFAULT;
}
} else {

View File

@ -3272,7 +3272,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
}
skb = next;
if (netif_xmit_stopped(txq) && skb) {
if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}

View File

@ -717,7 +717,8 @@ int netpoll_setup(struct netpoll *np)
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
!!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
err = 0;

View File

@ -3367,7 +3367,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = 0;
}
ret = dumpit(skb, cb);
if (ret < 0)
if (ret)
break;
}
cb->family = idx;

View File

@ -4944,6 +4944,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
*
* This is a helper to do that correctly considering GSO_BY_FRAGS.
*
* @skb: GSO skb
*
* @seg_len: The segmented length (from skb_gso_*_seglen). In the
* GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
*

View File

@ -3279,6 +3279,7 @@ int sock_load_diag_module(int family, int protocol)
#ifdef CONFIG_INET
if (family == AF_INET &&
protocol != IPPROTO_RAW &&
!rcu_access_pointer(inet_protos[protocol]))
return -ENOENT;
#endif

View File

@ -722,10 +722,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
return skb;
if (pskb_trim_rcsum(skb, netoff + len))
return skb;
if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
kfree_skb(skb);
return NULL;
}
if (pskb_trim_rcsum(skb, netoff + len)) {
kfree_skb(skb);
return NULL;
}
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(net, skb, user))
return NULL;

View File

@ -1246,7 +1246,7 @@ int ip_setsockopt(struct sock *sk, int level,
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_BPFILTER
#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
optname < BPFILTER_IPT_SET_MAX)
err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
@ -1559,7 +1559,7 @@ int ip_getsockopt(struct sock *sk, int level,
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
#ifdef CONFIG_BPFILTER
#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
@ -1596,7 +1596,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
err = do_ip_getsockopt(sk, level, optname, optval, optlen,
MSG_CMSG_COMPAT);
#ifdef CONFIG_BPFILTER
#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);

View File

@ -1001,6 +1001,9 @@ static int __init inet6_init(void)
err = ip6_flowlabel_init();
if (err)
goto ip6_flowlabel_fail;
err = ipv6_anycast_init();
if (err)
goto ipv6_anycast_fail;
err = addrconf_init();
if (err)
goto addrconf_fail;
@ -1091,6 +1094,8 @@ static int __init inet6_init(void)
ipv6_exthdrs_fail:
addrconf_cleanup();
addrconf_fail:
ipv6_anycast_cleanup();
ipv6_anycast_fail:
ip6_flowlabel_cleanup();
ip6_flowlabel_fail:
ndisc_late_cleanup();

View File

@ -44,8 +44,22 @@
#include <net/checksum.h>
#define IN6_ADDR_HSIZE_SHIFT 8
#define IN6_ADDR_HSIZE BIT(IN6_ADDR_HSIZE_SHIFT)
/* anycast address hash table
*/
static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
static DEFINE_SPINLOCK(acaddr_hash_lock);
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
{
u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
}
/*
* socket join an anycast group
*/
@ -204,16 +218,39 @@ void ipv6_sock_ac_close(struct sock *sk)
rtnl_unlock();
}
static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
{
unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
spin_lock(&acaddr_hash_lock);
hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
spin_unlock(&acaddr_hash_lock);
}
static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
{
spin_lock(&acaddr_hash_lock);
hlist_del_init_rcu(&aca->aca_addr_lst);
spin_unlock(&acaddr_hash_lock);
}
static void aca_get(struct ifacaddr6 *aca)
{
refcount_inc(&aca->aca_refcnt);
}
static void aca_free_rcu(struct rcu_head *h)
{
struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
fib6_info_release(aca->aca_rt);
kfree(aca);
}
static void aca_put(struct ifacaddr6 *ac)
{
if (refcount_dec_and_test(&ac->aca_refcnt)) {
fib6_info_release(ac->aca_rt);
kfree(ac);
call_rcu(&ac->rcu, aca_free_rcu);
}
}
@ -229,6 +266,7 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
aca->aca_addr = *addr;
fib6_info_hold(f6i);
aca->aca_rt = f6i;
INIT_HLIST_NODE(&aca->aca_addr_lst);
aca->aca_users = 1;
/* aca_tstamp should be updated upon changes */
aca->aca_cstamp = aca->aca_tstamp = jiffies;
@ -285,6 +323,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
aca_get(aca);
write_unlock_bh(&idev->lock);
ipv6_add_acaddr_hash(net, aca);
ip6_ins_rt(net, f6i);
addrconf_join_solict(idev->dev, &aca->aca_addr);
@ -325,6 +365,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
else
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@ -352,6 +393,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@ -390,17 +433,25 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr)
{
unsigned int hash = inet6_acaddr_hash(net, addr);
struct net_device *nh_dev;
struct ifacaddr6 *aca;
bool found = false;
rcu_read_lock();
if (dev)
found = ipv6_chk_acast_dev(dev, addr);
else
for_each_netdev_rcu(net, dev)
if (ipv6_chk_acast_dev(dev, addr)) {
hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
aca_addr_lst) {
nh_dev = fib6_info_nh_dev(aca->aca_rt);
if (!nh_dev || !net_eq(dev_net(nh_dev), net))
continue;
if (ipv6_addr_equal(&aca->aca_addr, addr)) {
found = true;
break;
}
}
rcu_read_unlock();
return found;
}
@ -540,3 +591,24 @@ void ac6_proc_exit(struct net *net)
remove_proc_entry("anycast6", net->proc_net);
}
#endif
/* Init / cleanup code
*/
int __init ipv6_anycast_init(void)
{
int i;
for (i = 0; i < IN6_ADDR_HSIZE; i++)
INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
return 0;
}
void ipv6_anycast_cleanup(void)
{
int i;
spin_lock(&acaddr_hash_lock);
for (i = 0; i < IN6_ADDR_HSIZE; i++)
WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
spin_unlock(&acaddr_hash_lock);
}

View File

@ -591,7 +591,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
/* fib entries are never clones */
if (arg.filter.flags & RTM_F_CLONED)
return skb->len;
goto out;
w = (void *)cb->args[2];
if (!w) {
@ -621,7 +621,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
tb = fib6_get_table(net, arg.filter.table_id);
if (!tb) {
if (arg.filter.dump_all_families)
return skb->len;
goto out;
NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
return -ENOENT;

View File

@ -587,11 +587,16 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
*/
ret = -EINPROGRESS;
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len &&
nf_ct_frag6_reasm(fq, skb, dev))
ret = 0;
else
fq->q.meat == fq->q.len) {
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
if (nf_ct_frag6_reasm(fq, skb, dev))
ret = 0;
skb->_skb_refdst = orefdst;
} else {
skb_dst_drop(skb);
}
out_unlock:
spin_unlock_bh(&fq->q.lock);

View File

@ -55,11 +55,15 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex is held: */
/* When the nfnl mutex or ip_set_ref_lock is held: */
#define ip_set_dereference(p) \
rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
rcu_dereference_protected(p, \
lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
lockdep_is_held(&ip_set_ref_lock))
#define ip_set(inst, id) \
ip_set_dereference((inst)->ip_set_list)[id]
#define ip_set_ref_netlink(inst,id) \
rcu_dereference_raw((inst)->ip_set_list)[id]
/* The set types are implemented in modules and registered set types
* can be found in ip_set_type_list. Adding/deleting types is
@ -693,21 +697,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
/* Get the name of a set behind a set index.
* We assume the set is referenced, so it does exist and
* can't be destroyed. The set cannot be renamed due to
* the referencing either.
*
* Set itself is protected by RCU, but its name isn't: to protect against
* renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
* name.
*/
const char *
ip_set_name_byindex(struct net *net, ip_set_id_t index)
void
ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
{
const struct ip_set *set = ip_set_rcu_get(net, index);
struct ip_set *set = ip_set_rcu_get(net, index);
BUG_ON(!set);
BUG_ON(set->ref == 0);
/* Referenced, so it's safe */
return set->name;
read_lock_bh(&ip_set_ref_lock);
strncpy(name, set->name, IPSET_MAXNAMELEN);
read_unlock_bh(&ip_set_ref_lock);
}
EXPORT_SYMBOL_GPL(ip_set_name_byindex);
@ -961,7 +964,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
/* Wraparound */
goto cleanup;
list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
@ -973,7 +976,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
/* Use new list */
index = inst->ip_set_max;
inst->ip_set_max = i;
kfree(tmp);
kvfree(tmp);
ret = 0;
} else if (ret) {
goto cleanup;
@ -1153,7 +1156,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
if (!set)
return -ENOENT;
read_lock_bh(&ip_set_ref_lock);
write_lock_bh(&ip_set_ref_lock);
if (set->ref != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
@ -1170,7 +1173,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
strncpy(set->name, name2, IPSET_MAXNAMELEN);
out:
read_unlock_bh(&ip_set_ref_lock);
write_unlock_bh(&ip_set_ref_lock);
return ret;
}
@ -1252,7 +1255,7 @@ ip_set_dump_done(struct netlink_callback *cb)
struct ip_set_net *inst =
(struct ip_set_net *)cb->args[IPSET_CB_NET];
ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
struct ip_set *set = ip_set(inst, index);
struct ip_set *set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
@ -1441,7 +1444,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[IPSET_CB_ARG0]) {
set = ip_set(inst, index);
set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
@ -2059,7 +2062,7 @@ ip_set_net_init(struct net *net)
if (inst->ip_set_max >= IPSET_INVALID_ID)
inst->ip_set_max = IPSET_INVALID_ID - 1;
list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
return -ENOMEM;
inst->is_deleted = false;
@ -2087,7 +2090,7 @@ ip_set_net_exit(struct net *net)
}
}
nfnl_unlock(NFNL_SUBSYS_IPSET);
kfree(rcu_dereference_protected(inst->ip_set_list, 1));
kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {

View File

@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@ -493,13 +493,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}

View File

@ -148,9 +148,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
{
struct set_elem *e = container_of(rcu, struct set_elem, rcu);
struct ip_set *set = e->set;
struct list_set *map = set->data;
ip_set_put_byindex(map->net, e->id);
ip_set_ext_destroy(set, e);
kfree(e);
}
@ -158,15 +156,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
static inline void
list_set_del(struct ip_set *set, struct set_elem *e)
{
struct list_set *map = set->data;
set->elements--;
list_del_rcu(&e->list);
ip_set_put_byindex(map->net, e->id);
call_rcu(&e->rcu, __list_set_del_rcu);
}
static inline void
list_set_replace(struct set_elem *e, struct set_elem *old)
list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
{
struct list_set *map = set->data;
list_replace_rcu(&old->list, &e->list);
ip_set_put_byindex(map->net, old->id);
call_rcu(&old->rcu, __list_set_del_rcu);
}
@ -298,7 +302,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
INIT_LIST_HEAD(&e->list);
list_set_init_extensions(set, ext, e);
if (n)
list_set_replace(e, n);
list_set_replace(set, e, n);
else if (next)
list_add_tail_rcu(&e->list, &next->list);
else if (prev)
@ -486,6 +490,7 @@ list_set_list(const struct ip_set *set,
const struct list_set *map = set->data;
struct nlattr *atd, *nested;
u32 i = 0, first = cb->args[IPSET_CB_ARG0];
char name[IPSET_MAXNAMELEN];
struct set_elem *e;
int ret = 0;
@ -504,8 +509,8 @@ list_set_list(const struct ip_set *set,
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
if (nla_put_string(skb, IPSET_ATTR_NAME,
ip_set_name_byindex(map->net, e->id)))
ip_set_name_byindex(map->net, e->id, name);
if (nla_put_string(skb, IPSET_ATTR_NAME, name))
goto nla_put_failure;
if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure;

View File

@ -1073,19 +1073,22 @@ static unsigned int early_drop_list(struct net *net,
return drops;
}
static noinline int early_drop(struct net *net, unsigned int _hash)
static noinline int early_drop(struct net *net, unsigned int hash)
{
unsigned int i;
unsigned int i, bucket;
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
unsigned int hash, hsize, drops;
unsigned int hsize, drops;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hsize);
hash = reciprocal_scale(_hash++, hsize);
if (!i)
bucket = reciprocal_scale(hash, hsize);
else
bucket = (bucket + 1) % hsize;
drops = early_drop_list(net, &ct_hash[hash]);
drops = early_drop_list(net, &ct_hash[bucket]);
rcu_read_unlock();
if (drops) {

View File

@ -384,11 +384,6 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
},
};
static inline struct nf_dccp_net *dccp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.dccp;
}
static noinline bool
dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const struct dccp_hdr *dh)
@ -401,7 +396,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
dn = dccp_pernet(net);
dn = nf_dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "not picking up existing connection ";
goto out_invalid;
@ -568,7 +563,7 @@ static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout;
timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
@ -681,7 +676,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_dccp_net *dn = dccp_pernet(net);
struct nf_dccp_net *dn = nf_dccp_pernet(net);
unsigned int *timeouts = data;
int i;
@ -814,7 +809,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
static int dccp_init_net(struct net *net)
{
struct nf_dccp_net *dn = dccp_pernet(net);
struct nf_dccp_net *dn = nf_dccp_pernet(net);
struct nf_proto_net *pn = &dn->pn;
if (!pn->users) {

View File

@ -27,11 +27,6 @@ static bool nf_generic_should_process(u8 proto)
}
}
static inline struct nf_generic_net *generic_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.generic;
}
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
@ -58,7 +53,7 @@ static int generic_packet(struct nf_conn *ct,
}
if (!timeout)
timeout = &generic_pernet(nf_ct_net(ct))->timeout;
timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
@ -72,7 +67,7 @@ static int generic_packet(struct nf_conn *ct,
static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_generic_net *gn = generic_pernet(net);
struct nf_generic_net *gn = nf_generic_pernet(net);
unsigned int *timeout = data;
if (!timeout)
@ -138,7 +133,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int generic_init_net(struct net *net)
{
struct nf_generic_net *gn = generic_pernet(net);
struct nf_generic_net *gn = nf_generic_pernet(net);
struct nf_proto_net *pn = &gn->pn;
gn->timeout = nf_ct_generic_timeout;

View File

@ -25,11 +25,6 @@
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
static inline struct nf_icmp_net *icmp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.icmp;
}
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
{
@ -103,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct,
}
if (!timeout)
timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
@ -275,7 +270,7 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
struct nf_icmp_net *in = icmp_pernet(net);
struct nf_icmp_net *in = nf_icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
if (!timeout)
@ -337,7 +332,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int icmp_init_net(struct net *net)
{
struct nf_icmp_net *in = icmp_pernet(net);
struct nf_icmp_net *in = nf_icmp_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmp_timeout;

View File

@ -30,11 +30,6 @@
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.icmpv6;
}
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net,
@ -87,7 +82,7 @@ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
return &icmpv6_pernet(net)->timeout;
return &nf_icmpv6_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
@ -286,7 +281,7 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeout = data;
struct nf_icmp_net *in = icmpv6_pernet(net);
struct nf_icmp_net *in = nf_icmpv6_pernet(net);
if (!timeout)
timeout = icmpv6_get_timeouts(net);
@ -348,7 +343,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int icmpv6_init_net(struct net *net)
{
struct nf_icmp_net *in = icmpv6_pernet(net);
struct nf_icmp_net *in = nf_icmpv6_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmpv6_timeout;

View File

@ -146,11 +146,6 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
}
};
static inline struct nf_sctp_net *sctp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.sctp;
}
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@ -480,7 +475,7 @@ static int sctp_packet(struct nf_conn *ct,
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
@ -599,7 +594,7 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
struct nf_sctp_net *sn = sctp_pernet(net);
struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
/* set default SCTP timeouts. */
@ -736,7 +731,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int sctp_init_net(struct net *net)
{
struct nf_sctp_net *sn = sctp_pernet(net);
struct nf_sctp_net *sn = nf_sctp_pernet(net);
struct nf_proto_net *pn = &sn->pn;
if (!pn->users) {

View File

@ -272,11 +272,6 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
}
};
static inline struct nf_tcp_net *tcp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.tcp;
}
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@ -475,7 +470,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
const struct tcphdr *tcph)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@ -767,7 +762,7 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
{
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
const struct nf_tcp_net *tn = tcp_pernet(net);
const struct nf_tcp_net *tn = nf_tcp_pernet(net);
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
@ -841,7 +836,7 @@ static int tcp_packet(struct nf_conn *ct,
const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
@ -1283,7 +1278,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
unsigned int *timeouts = data;
int i;
@ -1508,7 +1503,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int tcp_init_net(struct net *net)
{
struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_proto_net *pn = &tn->pn;
if (!pn->users) {

View File

@ -32,14 +32,9 @@ static const unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_REPLIED] = 180*HZ,
};
static inline struct nf_udp_net *udp_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.udp;
}
static unsigned int *udp_get_timeouts(struct net *net)
{
return udp_pernet(net)->timeouts;
return nf_udp_pernet(net)->timeouts;
}
static void udp_error_log(const struct sk_buff *skb,
@ -212,7 +207,7 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
unsigned int *timeouts = data;
struct nf_udp_net *un = udp_pernet(net);
struct nf_udp_net *un = nf_udp_pernet(net);
if (!timeouts)
timeouts = un->timeouts;
@ -292,7 +287,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
static int udp_init_net(struct net *net)
{
struct nf_udp_net *un = udp_pernet(net);
struct nf_udp_net *un = nf_udp_pernet(net);
struct nf_proto_net *pn = &un->pn;
if (!pn->users) {

View File

@ -382,7 +382,8 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
static int
cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
u32 seq, u32 type, int event, u16 l3num,
const struct nf_conntrack_l4proto *l4proto)
const struct nf_conntrack_l4proto *l4proto,
const unsigned int *timeouts)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
@ -408,7 +409,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
if (!nest_parms)
goto nla_put_failure;
ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
if (ret < 0)
goto nla_put_failure;
@ -430,6 +431,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
struct netlink_ext_ack *extack)
{
const struct nf_conntrack_l4proto *l4proto;
unsigned int *timeouts = NULL;
struct sk_buff *skb2;
int ret, err;
__u16 l3num;
@ -442,12 +444,44 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find_get(l4num);
/* This protocol is not supported, skip. */
if (l4proto->l4proto != l4num) {
err = -EOPNOTSUPP;
err = -EOPNOTSUPP;
if (l4proto->l4proto != l4num)
goto err;
switch (l4proto->l4proto) {
case IPPROTO_ICMP:
timeouts = &nf_icmp_pernet(net)->timeout;
break;
case IPPROTO_TCP:
timeouts = nf_tcp_pernet(net)->timeouts;
break;
case IPPROTO_UDP:
timeouts = nf_udp_pernet(net)->timeouts;
break;
case IPPROTO_DCCP:
#ifdef CONFIG_NF_CT_PROTO_DCCP
timeouts = nf_dccp_pernet(net)->dccp_timeout;
#endif
break;
case IPPROTO_ICMPV6:
timeouts = &nf_icmpv6_pernet(net)->timeout;
break;
case IPPROTO_SCTP:
#ifdef CONFIG_NF_CT_PROTO_SCTP
timeouts = nf_sctp_pernet(net)->timeouts;
#endif
break;
case 255:
timeouts = &nf_generic_pernet(net)->timeout;
break;
default:
WARN_ON_ONCE(1);
break;
}
if (!timeouts)
goto err;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
err = -ENOMEM;
@ -458,8 +492,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
l3num,
l4proto);
l3num, l4proto, timeouts);
if (ret <= 0) {
kfree_skb(skb2);
err = -ENOMEM;

View File

@ -54,9 +54,11 @@ static bool nft_xt_put(struct nft_xt *xt)
return false;
}
static int nft_compat_chain_validate_dependency(const char *tablename,
const struct nft_chain *chain)
static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
const char *tablename)
{
enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
const struct nft_chain *chain = ctx->chain;
const struct nft_base_chain *basechain;
if (!tablename ||
@ -64,9 +66,12 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
return 0;
basechain = nft_base_chain(chain);
if (strcmp(tablename, "nat") == 0 &&
basechain->type->type != NFT_CHAIN_T_NAT)
return -EINVAL;
if (strcmp(tablename, "nat") == 0) {
if (ctx->family != NFPROTO_BRIDGE)
type = NFT_CHAIN_T_NAT;
if (basechain->type->type != type)
return -EINVAL;
}
return 0;
}
@ -342,8 +347,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(target->table,
ctx->chain);
ret = nft_compat_chain_validate_dependency(ctx, target->table);
if (ret < 0)
return ret;
}
@ -590,8 +594,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
ret = nft_compat_chain_validate_dependency(match->table,
ctx->chain);
ret = nft_compat_chain_validate_dependency(ctx, match->table);
if (ret < 0)
return ret;
}

View File

@ -24,7 +24,6 @@ struct nft_ng_inc {
u32 modulus;
atomic_t counter;
u32 offset;
struct nft_set *map;
};
static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
@ -48,34 +47,11 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = nft_ng_inc_gen(priv);
}
static void nft_ng_inc_map_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_ng_inc *priv = nft_expr_priv(expr);
const struct nft_set *map = priv->map;
const struct nft_set_ext *ext;
u32 result;
bool found;
result = nft_ng_inc_gen(priv);
found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
if (!found)
return;
nft_data_copy(&regs->data[priv->dreg],
nft_set_ext_data(ext), map->dlen);
}
static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
[NFTA_NG_DREG] = { .type = NLA_U32 },
[NFTA_NG_MODULUS] = { .type = NLA_U32 },
[NFTA_NG_TYPE] = { .type = NLA_U32 },
[NFTA_NG_OFFSET] = { .type = NLA_U32 },
[NFTA_NG_SET_NAME] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_NG_SET_ID] = { .type = NLA_U32 },
};
static int nft_ng_inc_init(const struct nft_ctx *ctx,
@ -101,22 +77,6 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_ng_inc *priv = nft_expr_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
nft_ng_inc_init(ctx, expr, tb);
priv->map = nft_set_lookup_global(ctx->net, ctx->table,
tb[NFTA_NG_SET_NAME],
tb[NFTA_NG_SET_ID], genmask);
return PTR_ERR_OR_ZERO(priv->map);
}
static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
u32 modulus, enum nft_ng_types type, u32 offset)
{
@ -143,27 +103,10 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
priv->offset);
}
static int nft_ng_inc_map_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
const struct nft_ng_inc *priv = nft_expr_priv(expr);
if (nft_ng_dump(skb, priv->dreg, priv->modulus,
NFT_NG_INCREMENTAL, priv->offset) ||
nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
struct nft_ng_random {
enum nft_registers dreg:8;
u32 modulus;
u32 offset;
struct nft_set *map;
};
static u32 nft_ng_random_gen(struct nft_ng_random *priv)
@ -183,25 +126,6 @@ static void nft_ng_random_eval(const struct nft_expr *expr,
regs->data[priv->dreg] = nft_ng_random_gen(priv);
}
static void nft_ng_random_map_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_ng_random *priv = nft_expr_priv(expr);
const struct nft_set *map = priv->map;
const struct nft_set_ext *ext;
u32 result;
bool found;
result = nft_ng_random_gen(priv);
found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
if (!found)
return;
nft_data_copy(&regs->data[priv->dreg],
nft_set_ext_data(ext), map->dlen);
}
static int nft_ng_random_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@ -226,21 +150,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
static int nft_ng_random_map_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_ng_random *priv = nft_expr_priv(expr);
u8 genmask = nft_genmask_next(ctx->net);
nft_ng_random_init(ctx, expr, tb);
priv->map = nft_set_lookup_global(ctx->net, ctx->table,
tb[NFTA_NG_SET_NAME],
tb[NFTA_NG_SET_ID], genmask);
return PTR_ERR_OR_ZERO(priv->map);
}
static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ng_random *priv = nft_expr_priv(expr);
@ -249,22 +158,6 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
priv->offset);
}
static int nft_ng_random_map_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
const struct nft_ng_random *priv = nft_expr_priv(expr);
if (nft_ng_dump(skb, priv->dreg, priv->modulus,
NFT_NG_RANDOM, priv->offset) ||
nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_ng_type;
static const struct nft_expr_ops nft_ng_inc_ops = {
.type = &nft_ng_type,
@ -274,14 +167,6 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
.dump = nft_ng_inc_dump,
};
static const struct nft_expr_ops nft_ng_inc_map_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
.eval = nft_ng_inc_map_eval,
.init = nft_ng_inc_map_init,
.dump = nft_ng_inc_map_dump,
};
static const struct nft_expr_ops nft_ng_random_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
@ -290,14 +175,6 @@ static const struct nft_expr_ops nft_ng_random_ops = {
.dump = nft_ng_random_dump,
};
static const struct nft_expr_ops nft_ng_random_map_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
.eval = nft_ng_random_map_eval,
.init = nft_ng_random_map_init,
.dump = nft_ng_random_map_dump,
};
static const struct nft_expr_ops *
nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
@ -312,12 +189,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
switch (type) {
case NFT_NG_INCREMENTAL:
if (tb[NFTA_NG_SET_NAME])
return &nft_ng_inc_map_ops;
return &nft_ng_inc_ops;
case NFT_NG_RANDOM:
if (tb[NFTA_NG_SET_NAME])
return &nft_ng_random_map_ops;
return &nft_ng_random_ops;
}

View File

@ -50,7 +50,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
int err;
u8 ttl;
if (nla_get_u8(tb[NFTA_OSF_TTL])) {
if (tb[NFTA_OSF_TTL]) {
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
if (ttl > 2)
return -EINVAL;

View File

@ -114,6 +114,22 @@ static void idletimer_tg_expired(struct timer_list *t)
schedule_work(&timer->work);
}
static int idletimer_check_sysfs_name(const char *name, unsigned int size)
{
int ret;
ret = xt_check_proc_name(name, size);
if (ret < 0)
return ret;
if (!strcmp(name, "power") ||
!strcmp(name, "subsystem") ||
!strcmp(name, "uevent"))
return -EINVAL;
return 0;
}
static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
@ -124,6 +140,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
goto out;
}
ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
if (ret < 0)
goto out_free_timer;
sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {

View File

@ -1203,7 +1203,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
&info->labels.mask);
if (err)
return err;
} else if (labels_nonzero(&info->labels.mask)) {
} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value,
&info->labels.mask);
if (err)

View File

@ -611,6 +611,7 @@ struct rxrpc_call {
* not hard-ACK'd packet follows this.
*/
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
u16 tx_backoff; /* Delay to insert due to Tx failure */
/* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
* is fixed, we keep these numbers in terms of segments (ie. DATA

View File

@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
else
ack_at = expiry;
ack_at += READ_ONCE(call->tx_backoff);
ack_at += now;
if (time_before(ack_at, call->ack_at)) {
WRITE_ONCE(call->ack_at, ack_at);
@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
container_of(work, struct rxrpc_call, processor);
rxrpc_serial_t *send_ack;
unsigned long now, next, t;
unsigned int iterations = 0;
rxrpc_see_call(call);
@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
call->debug_id, rxrpc_call_states[call->state], call->events);
recheck_state:
/* Limit the number of times we do this before returning to the manager */
iterations++;
if (iterations > 5)
goto requeue;
if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
rxrpc_send_abort_packet(call);
goto recheck_state;
@ -447,13 +454,16 @@ void rxrpc_process_call(struct work_struct *work)
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
__rxrpc_queue_call(call);
goto out;
}
if (call->events && call->state < RXRPC_CALL_COMPLETE)
goto requeue;
out_put:
rxrpc_put_call(call, rxrpc_call_put);
out:
_leave("");
return;
requeue:
__rxrpc_queue_call(call);
goto out;
}

View File

@ -34,6 +34,21 @@ struct rxrpc_abort_buffer {
static const char rxrpc_keepalive_string[] = "";
/*
* Increase Tx backoff on transmission failure and clear it on success.
*/
static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
{
if (ret < 0) {
u16 tx_backoff = READ_ONCE(call->tx_backoff);
if (tx_backoff < HZ)
WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
} else {
WRITE_ONCE(call->tx_backoff, 0);
}
}
/*
* Arrange for a keepalive ping a certain time after we last transmitted. This
* lets the far side know we're still interested in this call and helps keep
@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
else
trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
rxrpc_tx_point_call_ack);
rxrpc_tx_backoff(call, ret);
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohs(pkt->ack.maxSkew),
ntohl(pkt->ack.serial),
true, true,
false, true,
rxrpc_propose_ack_retry_tx);
} else {
spin_lock_bh(&call->lock);
@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
else
trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
rxrpc_tx_point_call_abort);
rxrpc_tx_backoff(call, ret);
rxrpc_put_connection(conn);
return ret;
@ -413,6 +429,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
@ -445,9 +462,18 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
rxrpc_timer_set_for_normal);
}
}
rxrpc_set_keepalive(call);
rxrpc_set_keepalive(call);
} else {
/* Cancel the call if the initial transmission fails,
* particularly if that's due to network routing issues that
* aren't going away anytime soon. The layer above can arrange
* the retransmission.
*/
if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
RX_USER_ABORT, ret);
}
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
@ -506,6 +532,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
goto done;

View File

@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
}
/* Free the outqueue structure and any related pending chunks.