From 40849d1cd8e999721d88c4f9630363996ceea5a2 Mon Sep 17 00:00:00 2001 From: Chen Minqiang Date: Wed, 17 Jan 2024 07:47:23 +0800 Subject: [PATCH] hwnat: add natflow flow offload support Signed-off-by: Chen Minqiang --- drivers/net/ethernet/mediatek/Makefile | 2 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 130 ++- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 40 +- drivers/net/ethernet/mediatek/mtk_ppe.h | 40 +- drivers/net/ethernet/mediatek/mtk_ppe1.c | 787 ++++++++++++++++++ .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 9 +- .../net/ethernet/mediatek/mtk_ppe_offload1.c | 527 ++++++++++++ drivers/net/ethernet/mediatek/mtk_ppe_regs.h | 5 + drivers/net/ethernet/mediatek/mtk_wed.c | 57 +- drivers/net/ethernet/mediatek/mtk_wed.h | 7 + drivers/net/ppp/ppp_generic.c | 23 + drivers/net/ppp/pppoe.c | 23 + include/linux/netdevice.h | 24 + include/linux/ppp_channel.h | 1 + include/net/netfilter/nf_flow_table.h | 54 ++ net/8021q/vlan_dev.c | 19 + net/bridge/br_device.c | 22 + net/core/dev.c | 6 + net/dsa/slave.c | 21 + 19 files changed, 1755 insertions(+), 42 deletions(-) create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe1.c create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload1.c --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o -mtk_eth-y := mtk_eth_soc.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o +mtk_eth-y := mtk_eth_soc.o mtk_eth_path.o mtk_ppe1.o mtk_ppe_debugfs.o mtk_ppe_offload1.o mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_USXGMII) += mtk_usxgmii.o mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o ifdef CONFIG_DEBUG_FS --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include "mtk_eth_soc.h" #include "mtk_wed.h" @@ -1409,7 +1411,17 @@ static void mtk_tx_set_dma_desc_v1(struc /* vlan header offload */ if (info->vlan) data |= TX_DMA_INS_VLAN | info->vlan_tci; + if (info->natflow) { + if (!netdev_uses_dsa(dev) && !info->vlan) { + data |= TX_DMA_INS_VLAN | 1; + } + } + } + if (info->natflow) { + data &= ~(TX_DMA_FPORT_MASK << TX_DMA_FPORT_SHIFT); + data |= (0x4 & TX_DMA_FPORT_MASK) << TX_DMA_FPORT_SHIFT; } + WRITE_ONCE(desc->txd4, data); } @@ -1446,6 +1458,10 @@ static void mtk_tx_set_dma_desc_v2(struc } data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); + if (info->natflow) { + data &= ~(TX_DMA_FPORT_MASK_V2 << TX_DMA_FPORT_SHIFT_V2); + data |= (PSE_PPE0_PORT & TX_DMA_FPORT_MASK_V2) << TX_DMA_FPORT_SHIFT_V2; + } WRITE_ONCE(desc->txd4, data); data = 0; @@ -1463,6 +1479,11 @@ static void mtk_tx_set_dma_desc_v2(struc data = 0; if (info->first && info->vlan) data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci; + if (info->first && info->natflow) { + if (!netdev_uses_dsa(dev) && !info->vlan) { + data |= TX_DMA_INS_VLAN_V2 | 1; + } + } WRITE_ONCE(desc->txd6, data); WRITE_ONCE(desc->txd7, 0); @@ -1493,6 +1514,7 @@ static int mtk_tx_map(struct sk_buff *sk .vlan_tci = skb_vlan_tag_get(skb), .first = true, .last = !skb_is_nonlinear(skb), + .natflow = ((skb->mark & HWNAT_QUEUE_MAPPING_MAGIC_MASK) == HWNAT_QUEUE_MAPPING_MAGIC && (skb->hash & HWNAT_QUEUE_MAPPING_MAGIC_MASK) == HWNAT_QUEUE_MAPPING_MAGIC), }; struct netdev_queue *txq; struct mtk_mac *mac = netdev_priv(dev); @@ -2140,7 +2162,6 @@ static int mtk_poll_rx(struct napi_struc while (done < budget) { unsigned int pktlen, *rxdcsum; struct net_device *netdev; - u32 hash, reason; int mac = 0; ring = mtk_get_rx_ring(eth); @@ -2268,18 +2289,8 @@ static int mtk_poll_rx(struct napi_struc bytes += skb->len; if (mtk_is_netsys_v3_or_greater(eth)) { - reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5); - hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; - if (hash != MTK_RXD5_FOE_ENTRY) - skb_set_hash(skb, jhash_1word(hash, 0), - PKT_HASH_TYPE_L4); rxdcsum = &trxd.rxd3; } else { - reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); - hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; - if (hash != MTK_RXD4_FOE_ENTRY) - skb_set_hash(skb, jhash_1word(hash, 0), - PKT_HASH_TYPE_L4); rxdcsum = &trxd.rxd4; } @@ -2301,11 +2312,33 @@ static int mtk_poll_rx(struct napi_struc skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); } - if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) - mtk_ppe_check_skb(eth->ppe[0], skb, hash); - - skb_record_rx_queue(skb, 0); - napi_gro_receive(napi, skb); + if (mtk_is_netsys_v3_or_greater(eth)) { + switch (mtk_offload_check_rx_v2(eth, skb, trxd.rxd5)) { + case 0: + skb_record_rx_queue(skb, 0); + napi_gro_receive(napi, skb); + break; + case 1: + skb_record_rx_queue(skb, 0); + netif_receive_skb(skb); + break; + default: + dev_kfree_skb(skb); + } + } else { + switch (mtk_offload_check_rx(eth, skb, trxd.rxd4)) { + case 0: + skb_record_rx_queue(skb, 0); + napi_gro_receive(napi, skb); + break; + case 1: + skb_record_rx_queue(skb, 0); + netif_receive_skb(skb); + break; + default: + dev_kfree_skb(skb); + } + } skip_rx: ring->data[idx] = new_data; @@ -3455,7 +3488,7 @@ found: if (mac->speed > 0 && mac->speed <= s.base.speed) s.base.speed = 0; - mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); + mtk_set_queue_speed(eth, dp->index + 3 + (mac->id == 0 ? 0 : 6), s.base.speed); return NOTIFY_DONE; } @@ -3534,6 +3567,7 @@ static int mtk_open(struct net_device *d md_dst->u.port_info.port_id = i; eth->dsa_meta[i] = md_dst; } +#if 0 } else { /* Hardware special tag parsing needs to be disabled if at least * one MAC does not use DSA. @@ -3543,6 +3577,7 @@ static int mtk_open(struct net_device *d mtk_w32(eth, val, MTK_CDMP_IG_CTRL); mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); +#endif } return 0; @@ -4050,10 +4085,12 @@ static int mtk_hw_init(struct mtk_eth *e val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); if (mtk_is_netsys_v1(eth)) { + /* Hardware special tag parsing needs to be disabled for netsys_v1 */ val = mtk_r32(eth, MTK_CDMP_IG_CTRL); - mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); + val &= ~MTK_CDMP_STAG_EN; + mtk_w32(eth, val, MTK_CDMP_IG_CTRL); - mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); } /* set interrupt delays based on current Net DIM sample */ @@ -4553,7 +4590,7 @@ static u16 mtk_select_queue(struct net_d unsigned int queue = 0; if (netdev_uses_dsa(dev)) - queue = skb_get_queue_mapping(skb) + 3; + queue = skb_get_queue_mapping(skb) + 3 + (mac->id == 0 ? 0 : 6); else queue = mac->id; @@ -4563,6 +4600,51 @@ static u16 mtk_select_queue(struct net_d return queue; } +static int +mtk_flow_offload(flow_offload_type_t type, flow_offload_t *flow, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest) +{ + struct mtk_mac *mac = NULL; + struct mtk_eth *eth; + + /* for now offload only do support natflow */ + if (flow->flags != 0) { + return -EINVAL; + } + + if (src->dev->netdev_ops->ndo_flow_offload == mtk_flow_offload) { + mac = netdev_priv(src->dev); + } else if (dest->dev->netdev_ops->ndo_flow_offload == mtk_flow_offload) { + mac = netdev_priv(dest->dev); + } else { + return -EINVAL; + } + + eth = mac->hw; + + if (!eth->soc->offload_version) + return -EINVAL; + + return mtk_flow_offload_add(eth, type, flow, src, dest); +} + +static int mtk_flow_offload_check(flow_offload_hw_path_t *path) +{ + if (!(path->flags & FLOW_OFFLOAD_PATH_ETHERNET)) + return -EINVAL; + + if ((path->flags & FLOW_OFFLOAD_PATH_STOP)) { + mtk_flow_offload_stop(); + } else if ((path->flags & FLOW_OFFLOAD_PATH_DEL)) { + struct mtk_mac *mac = netdev_priv(path->dev); + struct mtk_eth *eth = mac->hw; + mtk_flow_offload_foe_delete(eth, path->vlan_proto); + mtk_flow_offload_foe_delete(eth, path->vlan_id); + } + return 0; +} + static const struct ethtool_ops mtk_ethtool_ops = { .get_link_ksettings = mtk_get_link_ksettings, .set_link_ksettings = mtk_set_link_ksettings, @@ -4594,7 +4676,8 @@ static const struct net_device_ops mtk_n #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mtk_poll_controller, #endif - .ndo_setup_tc = mtk_eth_setup_tc, + .ndo_flow_offload = mtk_flow_offload, + .ndo_flow_offload_check = mtk_flow_offload_check, .ndo_bpf = mtk_xdp, .ndo_xdp_xmit = mtk_xdp_xmit, .ndo_select_queue = mtk_select_queue, @@ -5118,7 +5201,8 @@ static int mtk_probe(struct platform_dev return 0; err_deinit_ppe: - mtk_ppe_deinit(eth); + mtk_eth_offload_exit(eth); + //mtk_ppe_deinit(eth); mtk_mdio_cleanup(eth); err_free_dev: mtk_free_dev(eth); @@ -5138,6 +5222,8 @@ static int mtk_remove(struct platform_de struct mtk_mac *mac; int i; + mtk_eth_offload_exit(eth); + /* stop all devices to make sure that dma is properly shut down */ for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -33,6 +33,7 @@ #define MTK_TX_DMA_BUF_LEN_V2 0xffff #define MTK_QDMA_RING_SIZE 2048 #define MTK_DMA_SIZE 512 +#define MTK_MAC_COUNT 3 #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) #define MTK_DMA_DUMMY_DESC 0xffffffff @@ -298,6 +299,8 @@ #define MTK_RLS_DONE_INT BIT(0) /* QDMA TX NUM */ +#define MTK_QDMA_TX_NUM 16 +#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1) #define QID_BITS_V2(x) (((x) & 0x3f) << 16) #define MTK_QDMA_GMAC2_QID 8 @@ -360,7 +363,7 @@ #endif /* QDMA descriptor rxd3 */ -#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) +#define RX_DMA_VID(x) ((x) & 0x1fff) /* ext hnat need this for hash HWNAT_QUEUE_MAPPING_HASH_MASK */ #define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) #define RX_DMA_VPID(x) (((x) >> 16) & 0xffff) @@ -685,6 +688,17 @@ #define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100)) +/* natflow.h */ +#if (defined(CONFIG_PINCTRL_MT7988) || defined(CONFIG_PINCTRL_MT7986) || defined(CONFIG_PINCTRL_MT7981)) +#define HWNAT_QUEUE_MAPPING_MAGIC 0x8000 +#define HWNAT_QUEUE_MAPPING_MAGIC_MASK 0xc000 +#define HWNAT_QUEUE_MAPPING_HASH_MASK 0x3fff +#else +#define HWNAT_QUEUE_MAPPING_MAGIC 0x8000 +#define HWNAT_QUEUE_MAPPING_MAGIC_MASK 0xe000 +#define HWNAT_QUEUE_MAPPING_HASH_MASK 0x1fff +#endif + struct mtk_rx_dma { unsigned int rxd1; unsigned int rxd2; @@ -1216,6 +1230,7 @@ struct mtk_tx_dma_desc_info { u8 vlan:1; u8 first:1; u8 last:1; + u8 natflow:1; }; struct mtk_reg_map { @@ -1335,6 +1350,8 @@ struct mtk_usxgmii_pcs { struct phylink_pcs pcs; }; +#define MTK_MAX_DEVS 3 + /* struct mtk_eth - This is the main datasructure for holding the state * of the driver * @dev: The device pointer @@ -1444,7 +1461,7 @@ struct mtk_eth { struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; struct mtk_ppe *ppe[2]; - struct rhashtable flow_table; + flow_offload_t __rcu **foe_flow_table; struct bpf_prog __rcu *prog; @@ -1608,6 +1625,14 @@ static inline bool mtk_interface_mode_is } } +static inline u32 mtk_get_ib2_mib_counter_mask(struct mtk_eth *eth) +{ + if (mtk_is_netsys_v2_or_greater(eth)) + return MTK_FOE_IB2_MIB_CNT_V2; + + return MTK_FOE_IB2_MIB_CNT; +} + /* read the hardware status register */ void mtk_stats_update_mac(struct mtk_mac *mac); @@ -1621,9 +1646,16 @@ int mtk_gmac_gephy_path_setup(struct mtk int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_usxgmii_path_setup(struct mtk_eth *eth, int mac_id); +void mtk_eth_offload_exit(struct mtk_eth *eth); int mtk_eth_offload_init(struct mtk_eth *eth); -int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data); +int mtk_flow_offload_add(struct mtk_eth *eth, flow_offload_type_t type, + flow_offload_t *flow, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest); +void mtk_flow_offload_foe_delete(struct mtk_eth *eth, u16 hash); +void mtk_flow_offload_stop(void); +int mtk_offload_check_rx(struct mtk_eth *eth, struct sk_buff *skb, u32 rxd4); +int mtk_offload_check_rx_v2(struct mtk_eth *eth, struct sk_buff *skb, u32 rxd5); int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, int ppe_index); void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list); --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -8,7 +8,11 @@ #include #include +#if (defined(CONFIG_PINCTRL_MT7988) || defined(CONFIG_PINCTRL_MT7986) || defined(CONFIG_PINCTRL_MT7981)) +#define MTK_PPE_ENTRIES_SHIFT 4 +#else #define MTK_PPE_ENTRIES_SHIFT 3 +#endif #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) #define MTK_PPE_WAIT_TIMEOUT_US 1000000 @@ -34,6 +38,7 @@ /* CONFIG_MEDIATEK_NETSYS_V2 */ #define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0) +#define MTK_FOE_IB1_BIND_KEEPALIVE_V2 BIT(13) #define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14) #define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17) #define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18) @@ -128,14 +133,18 @@ struct mtk_foe_mac_info { /* software-only entry type */ struct mtk_foe_bridge { - u8 dest_mac[ETH_ALEN]; - u8 src_mac[ETH_ALEN]; - u16 vlan; + u32 dest_mac_hi; + + u16 src_mac_lo; + u16 dest_mac_lo; - struct {} key_end; + u32 src_mac_hi; u32 ib2; + u32 _rsv[5]; + + u32 udf_tsid; struct mtk_foe_mac_info l2; }; @@ -340,10 +349,21 @@ struct mtk_ppe { struct hlist_head *foe_flow; struct rhashtable l2_flows; +}; - void *acct_table; +struct mtk_ppe_account_group { + unsigned int hash; + unsigned int state; + unsigned long jiffies; + unsigned long long bytes; + unsigned long long packets; + unsigned int speed_bytes[4]; + unsigned int speed_packets[4]; + void *priv; /* for keepalive callback */ }; +void mtk_ppe_read_mib(struct mtk_ppe *ppe, unsigned int hash, struct mtk_foe_accounting *diff); + struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index); void mtk_ppe_deinit(struct mtk_eth *eth); void mtk_ppe_start(struct mtk_ppe *ppe); @@ -372,10 +392,14 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s ppe->foe_check_time[hash] = now; __mtk_ppe_check_skb(ppe, skb, hash); } +#endif + +struct mtk_ppe_account_group *mtk_ppe_account_group_get(u32 idx); int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, int type, int l4proto, u8 pse_port, u8 *src_mac, u8 *dest_mac); +int mtk_foe_entry_clear_ttl(struct mtk_eth *eth, struct mtk_foe_entry *entry); int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, struct mtk_foe_entry *entry, u8 port); int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth, @@ -397,10 +421,12 @@ int mtk_foe_entry_set_wdma(struct mtk_et bool amsdu_en); int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, unsigned int queue); +#if 0 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +#endif +void mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash); +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, u16 timestamp, u32 orig_hash); int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index); void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, int *idle); - -#endif --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe1.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mtk_eth_soc.h" +#include "mtk_ppe.h" +#include "mtk_ppe_regs.h" + +static struct mtk_ppe_account_group mtk_ppe_account_group_entry[64]; + +static u32 mtk_ppe_account_group_alloc(void) +{ + u32 i; + for (i = 1; i < 64; i++) { + if (mtk_ppe_account_group_entry[i].state == MTK_FOE_STATE_INVALID) { + mtk_ppe_account_group_entry[i].state = MTK_FOE_STATE_FIN; /* mark FIN as in use begin */ + mtk_ppe_account_group_entry[i].bytes = 0; + mtk_ppe_account_group_entry[i].packets = 0; + mtk_ppe_account_group_entry[i].jiffies = jiffies; + return i; + } + } + return 0; +} + +struct mtk_ppe_account_group *mtk_ppe_account_group_get(u32 idx) +{ + if (idx > 0 && idx < 64) { + return &mtk_ppe_account_group_entry[idx]; + } + return NULL; +} + +static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + writel(val, ppe->base + reg); +} + +static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg) +{ + return readl(ppe->base + reg); +} + +static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set) +{ + u32 val; + + val = ppe_r32(ppe, reg); + val &= ~mask; + val |= set; + ppe_w32(ppe, reg, val); + + return val; +} + +static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, 0, val); +} + +static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, val, 0); +} + +static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) +{ + int ret; + u32 val; + + ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val, + !(val & MTK_PPE_GLO_CFG_BUSY), + 20, MTK_PPE_WAIT_TIMEOUT_US); + + if (ret) + dev_err(ppe->dev, "PPE table busy"); + + return ret; +} + +static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) +{ + ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); + ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); +} + +static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable) +{ + mtk_ppe_cache_clear(ppe); + + ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN, + enable * MTK_PPE_CACHE_CTL_EN); +} + +static inline u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e) +{ + u32 hv1, hv2, hv3; + u32 hash; + + switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { + case MTK_PPE_PKT_TYPE_BRIDGE: + hv1 = e->bridge.src_mac_lo; + hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); + hv2 = e->bridge.src_mac_hi >> 16; + hv2 ^= e->bridge.dest_mac_lo; + hv3 = e->bridge.dest_mac_hi; + break; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = e->ipv4.orig.ports; + hv2 = e->ipv4.orig.dest_ip; + hv3 = e->ipv4.orig.src_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3]; + hv1 ^= e->ipv6.ports; + + hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2]; + hv2 ^= e->ipv6.dest_ip[0]; + + hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1]; + hv3 ^= e->ipv6.src_ip[0]; + break; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + default: + WARN_ON_ONCE(1); + return MTK_PPE_HASH_MASK; + } + + hash = (hv1 & hv2) | ((~hv1) & hv3); + hash = (hash >> 24) | ((hash & 0xffffff) << 8); + hash ^= hv1 ^ hv2 ^ hv3; + hash ^= hash >> 16; + hash <<= (ffs(eth->soc->hash_offset) - 1); + hash &= MTK_PPE_ENTRIES - 1; + + return hash; +} + +static inline struct mtk_foe_mac_info * +mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.l2; + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.l2; + + return &entry->ipv4.l2; +} + +static inline u32 * +mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.ib2; + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.ib2; + + return &entry->ipv4.ib2; +} + +int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, int type, int l4proto, + u8 pse_port, u8 *src_mac, u8 *dest_mac) +{ + struct mtk_foe_mac_info *l2; + u32 ports_pad, val; + u32 port_ag = 0; + + memset(entry, 0, sizeof(*entry)); + + if (mtk_is_netsys_v2_or_greater(eth)) { + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) | + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | + MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2 | + MTK_FOE_IB1_BIND_KEEPALIVE_V2; + entry->ib1 = val; + + val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) | + FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf); + } else { + int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f; + + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | + MTK_FOE_IB1_BIND_TTL | + MTK_FOE_IB1_BIND_CACHE | + MTK_FOE_IB1_BIND_KEEPALIVE; + entry->ib1 = val; + + port_ag = mtk_ppe_account_group_alloc(); + + val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) | + FIELD_PREP(MTK_FOE_IB2_PORT_AG, port_ag) | + FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port); + } + + if (is_multicast_ether_addr(dest_mac)) + val |= mtk_get_ib2_multicast_mask(eth); + + if (eth->soc->has_accounting) + val |= mtk_get_ib2_mib_counter_mask(eth); + + ports_pad = 0xa5a5a500 | (l4proto & 0xff); + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + entry->ipv4.orig.ports = ports_pad; + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + entry->ipv6.ports = ports_pad; + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + entry->ipv6.ib2 = val; + l2 = &entry->ipv6.l2; + } else { + entry->ipv4.ib2 = val; + l2 = &entry->ipv4.l2; + } + + l2->dest_mac_hi = get_unaligned_be32(dest_mac); + l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4); + l2->src_mac_hi = get_unaligned_be32(src_mac); + l2->src_mac_lo = get_unaligned_be16(src_mac + 4); + + if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + l2->etype = ETH_P_IPV6; + else + l2->etype = ETH_P_IP; + + return 0; +} + +int mtk_foe_entry_clear_ttl(struct mtk_eth *eth, struct mtk_foe_entry *entry) +{ + if (mtk_is_netsys_v2_or_greater(eth)) { + entry->ib1 &= ~MTK_FOE_IB1_BIND_TTL_V2; + } else { + entry->ib1 &= ~MTK_FOE_IB1_BIND_TTL; + } + + return 0; +} + +int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, + struct mtk_foe_entry *entry, u8 port) +{ + u32 *ib2 = mtk_foe_entry_ib2(eth, entry); + u32 val = *ib2; + + if (mtk_is_netsys_v2_or_greater(eth)) { + val &= ~MTK_FOE_IB2_DEST_PORT_V2; + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port); + } else { + val &= ~MTK_FOE_IB2_DEST_PORT; + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); + } + *ib2 = val; + + return 0; +} + +int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth, + struct mtk_foe_entry *entry, bool egress, + __be32 src_addr, __be16 src_port, + __be32 dest_addr, __be16 dest_port) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + struct mtk_ipv4_tuple *t; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + if (egress) { + t = &entry->ipv4.new; + break; + } + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + t = &entry->ipv4.orig; + break; + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr); + entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr); + return 0; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + t->src_ip = be32_to_cpu(src_addr); + t->dest_ip = be32_to_cpu(dest_addr); + + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + return 0; + + t->src_port = be16_to_cpu(src_port); + t->dest_port = be16_to_cpu(dest_port); + + return 0; +} + +int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, + struct mtk_foe_entry *entry, + __be32 *src_addr, __be16 src_port, + __be32 *dest_addr, __be16 dest_port) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + u32 *src, *dest; + int i; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + src = entry->dslite.tunnel_src_ip; + dest = entry->dslite.tunnel_dest_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6.src_port = be16_to_cpu(src_port); + entry->ipv6.dest_port = be16_to_cpu(dest_port); + fallthrough; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + src = entry->ipv6.src_ip; + dest = entry->ipv6.dest_ip; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + }; + + for (i = 0; i < 4; i++) + src[i] = be32_to_cpu(src_addr[i]); + for (i = 0; i < 4; i++) + dest[i] = be32_to_cpu(dest_addr[i]); + + return 0; +} + +int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry, int port) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + + l2->etype = BIT(port); + + if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth))) + entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1); + else + l2->etype |= BIT(8); + + entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth); + + return 0; +} + +int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, int vid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + + switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) { + case 0: + entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) | + mtk_prep_ib1_vlan_layer(eth, 1); + l2->vlan1 = vid; + return 0; + case 1: + if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) { + l2->vlan1 = vid; + l2->etype |= BIT(8); + } else { + l2->vlan2 = vid; + entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1); + } + return 0; + default: + return -ENOSPC; + } +} + +int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, int sid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + + if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) || + (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) + l2->etype = ETH_P_PPP_SES; + + entry->ib1 |= mtk_get_ib1_ppoe_mask(eth); + l2->pppoe_id = sid; + + return 0; +} + +int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int wdma_idx, int txq, int bss, int wcid, + bool amsdu_en) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + u32 *ib2 = mtk_foe_entry_ib2(eth, entry); + + switch (eth->soc->version) { + case 3: + *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | + MTK_FOE_IB2_WDMA_WINFO_V2; + l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) | + FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss); + l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en); + break; + case 2: + *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | + MTK_FOE_IB2_WDMA_WINFO_V2; + l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) | + FIELD_PREP(MTK_FOE_WINFO_BSS, bss); + break; + default: + *ib2 &= ~MTK_FOE_IB2_PORT_MG; + *ib2 |= MTK_FOE_IB2_WDMA_WINFO; + if (wdma_idx) + *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; + l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); + break; + } + + return 0; +} + +static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) +{ + return !(entry->ib1 & MTK_FOE_IB1_STATIC) && + FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; +} + +int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, + unsigned int queue) +{ + u32 *ib2 = mtk_foe_entry_ib2(eth, entry); + + if (mtk_is_netsys_v2_or_greater(eth)) { + *ib2 &= ~MTK_FOE_IB2_QID_V2; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue); + *ib2 |= MTK_FOE_IB2_PSE_QOS_V2; + } else { + *ib2 &= ~MTK_FOE_IB2_QID; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue); + *ib2 |= MTK_FOE_IB2_PSE_QOS; + } + + return 0; +} + +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 timestamp, u32 orig_hash) +{ + struct mtk_foe_entry *hwe; + u32 hash; + + if (mtk_is_netsys_v2_or_greater(ppe->eth)) { + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2, + timestamp); + } else { + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, + timestamp); + } + + hash = orig_hash; + hwe = mtk_foe_get_entry(ppe, hash); + if (!mtk_foe_entry_usable(hwe)) + return -ENOSPC; + memcpy(&hwe->data, &entry->data, ppe->eth->soc->foe_entry_size - sizeof(hwe->ib1)); + wmb(); + hwe->ib1 = entry->ib1; + + dma_wmb(); + + mtk_ppe_cache_clear(ppe); + + return hash; +} + +void mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) +{ + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash); + hwe->ib1 &= ~MTK_FOE_IB1_STATE; + hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); + dma_wmb(); + mtk_ppe_cache_clear(ppe); +} + +static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe) +{ + int ret; + u32 val; + + ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val, + !(val & MTK_PPE_MIB_SER_CR_ST), + 20, MTK_PPE_WAIT_TIMEOUT_US); + + if (ret) + dev_err(ppe->dev, "MIB table busy"); + + return ret; +} + +void mtk_ppe_read_mib(struct mtk_ppe *ppe, unsigned int hash, struct mtk_foe_accounting *diff) +{ + u32 val, cnt_r0, cnt_r1, cnt_r2; + int ret; + + val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, hash) | MTK_PPE_MIB_SER_CR_ST; + ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val); + + ret = mtk_ppe_mib_wait_busy(ppe); + if (ret) + return; + + cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0); + cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1); + cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2); + + if (mtk_is_netsys_v3_or_greater(ppe->eth)) { + /* 64 bit for each counter */ + u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3); + diff->bytes += ((u64)cnt_r1 << 32) | cnt_r0; + diff->packets += ((u64)cnt_r3 << 32) | cnt_r2; + } else { + /* 48 bit byte counter, 40 bit packet counter */ + u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0); + u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1); + u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1); + u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2); + diff->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low; + diff->packets += (pkt_cnt_high << 16) | pkt_cnt_low; + } +} + +int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) +{ + if (!ppe) + return -EINVAL; + + /* disable KA */ + ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); + ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); + ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0); + usleep_range(10000, 11000); + + /* set KA timer to maximum */ + ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); + ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff); + + /* set KA tick select */ + ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL); + ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); + usleep_range(10000, 11000); + + /* disable scan mode */ + ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE); + usleep_range(10000, 11000); + + return mtk_ppe_wait_busy(ppe); +} + +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index) +{ + bool accounting = eth->soc->has_accounting; + const struct mtk_soc_data *soc = eth->soc; + struct device *dev = eth->dev; + struct mtk_foe_entry *foe; + struct mtk_ppe *ppe; + struct mtk_mib_entry *mib; + + ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); + if (!ppe) + return NULL; + + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. + */ + ppe->base = base; + ppe->eth = eth; + ppe->dev = dev; + ppe->version = eth->soc->offload_version; + ppe->accounting = accounting; + + foe = dmam_alloc_coherent(ppe->dev, + MTK_PPE_ENTRIES * soc->foe_entry_size, + &ppe->foe_phys, GFP_KERNEL); + if (!foe) + return NULL; + + ppe->foe_table = foe; + + if (accounting) { + mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib), + &ppe->mib_phys, GFP_KERNEL); + if (!mib) + return NULL; + + memset(mib, 0, MTK_PPE_ENTRIES * sizeof(*mib)); + + ppe->mib_table = mib; + } + + mtk_ppe_debugfs_init(ppe, index); + + return ppe; +} + +static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) +{ + static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; + int i, k; + + memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size); + + if (!IS_ENABLED(CONFIG_SOC_MT7621)) + return; + + /* skip all entries that cross the 1024 byte boundary */ + for (i = 0; i < MTK_PPE_ENTRIES; i += 128) { + for (k = 0; k < ARRAY_SIZE(skip); k++) { + struct mtk_foe_entry *hwe; + + hwe = mtk_foe_get_entry(ppe, i + skip[k]); + hwe->ib1 |= MTK_FOE_IB1_STATIC; + } + } +} + +void mtk_ppe_start(struct mtk_ppe *ppe) +{ + u32 val; + + if (!ppe) + return; + + memset(mtk_ppe_account_group_entry, 0, sizeof(*mtk_ppe_account_group_entry) * 64); + + mtk_ppe_init_foe_table(ppe); + ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); + + val = MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN | + FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) | + FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE, + MTK_PPE_KEEPALIVE_DUP_CPU) | + FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | + FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | + FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, + MTK_PPE_ENTRIES_SHIFT); + if (mtk_is_netsys_v2_or_greater(ppe->eth)) + val |= MTK_PPE_TB_CFG_INFO_SEL; + if (!mtk_is_netsys_v3_or_greater(ppe->eth)) + val |= MTK_PPE_TB_CFG_ENTRY_80B; + ppe_w32(ppe, MTK_PPE_TB_CFG, val); + + ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, + MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6); + + mtk_ppe_cache_enable(ppe, true); + + val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_5T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_6RD | + MTK_PPE_FLOW_CFG_IP4_NAT | + MTK_PPE_FLOW_CFG_IP4_NAPT | + MTK_PPE_FLOW_CFG_IP4_DSLITE | + MTK_PPE_FLOW_CFG_L2_BRIDGE | + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; + if (mtk_is_netsys_v2_or_greater(ppe->eth)) + val |= MTK_PPE_MD_TOAP_BYP_CRSN0 | + MTK_PPE_MD_TOAP_BYP_CRSN1 | + MTK_PPE_MD_TOAP_BYP_CRSN2 | + MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY; + else + val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | + MTK_PPE_FLOW_CFG_IP4_UDP_FRAG; + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); + + val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) | + FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3); + ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 15) | + FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 2); + ppe_w32(ppe, MTK_PPE_BIND_AGE0, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 2) | + FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 15); + ppe_w32(ppe, MTK_PPE_BIND_AGE1, val); + + val = FIELD_PREP(MTK_PPE_KEEPALIVE_TIME, 1) | + FIELD_PREP(MTK_PPE_KEEPALIVE_TIME_TCP, 1) | + FIELD_PREP(MTK_PPE_KEEPALIVE_TIME_UDP, 1); + ppe_w32(ppe, MTK_PPE_KEEPALIVE, val); + + val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF; + ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val); + + val = MTK_PPE_BIND_LIMIT1_FULL | + FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1); + ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val); + + val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) | + FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1); + ppe_w32(ppe, MTK_PPE_BIND_RATE, val); + + /* enable PPE */ + val = MTK_PPE_GLO_CFG_EN | + MTK_PPE_GLO_CFG_IP4_L4_CS_DROP | + MTK_PPE_GLO_CFG_IP4_CS_DROP | + MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE; + ppe_m32(ppe, MTK_PPE_GLO_CFG, val | MTK_PPE_GLO_CFG_TTL0_DROP, val); + + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); + + if (mtk_is_netsys_v2_or_greater(ppe->eth)) { + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777); + ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f); + } + + if (ppe->accounting && ppe->mib_phys) { + memset(ppe->mib_table, 0, MTK_PPE_ENTRIES * sizeof(struct mtk_mib_entry)); + ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys); + ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN, + MTK_PPE_MIB_CFG_EN); + ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR, + MTK_PPE_MIB_CFG_RD_CLR); + ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN, + MTK_PPE_MIB_CFG_RD_CLR); + } +} + +int mtk_ppe_stop(struct mtk_ppe *ppe) +{ + u32 val; + int i; + + if (!ppe) + return 0; + + for (i = 0; i < MTK_PPE_ENTRIES; i++) { + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i); + + hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, + MTK_FOE_STATE_INVALID); + } + + mtk_ppe_cache_enable(ppe, false); + + /* disable offload engine */ + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); + + /* disable aging */ + val = MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN; + ppe_clear(ppe, MTK_PPE_TB_CFG, val); + + return mtk_ppe_wait_busy(ppe); +} --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -82,7 +82,6 @@ mtk_ppe_debugfs_foe_show(struct seq_file struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i); struct mtk_foe_mac_info *l2; struct mtk_flow_addr_info ai = {}; - struct mtk_foe_accounting *acct; unsigned char h_source[ETH_ALEN]; unsigned char h_dest[ETH_ALEN]; int type, state; @@ -96,8 +95,6 @@ mtk_ppe_debugfs_foe_show(struct seq_file if (bind && state != MTK_FOE_STATE_BIND) continue; - acct = mtk_ppe_mib_entry_read(ppe, i); - type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1); seq_printf(m, "%05x %s %7s", i, mtk_foe_entry_state_str(state), @@ -156,11 +153,9 @@ mtk_ppe_debugfs_foe_show(struct seq_file *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo); seq_printf(m, " eth=%pM->%pM etype=%04x" - " vlan=%d,%d ib1=%08x ib2=%08x" - " packets=%llu bytes=%llu\n", + " vlan=%d,%d ib1=%08x ib2=%08x\n", h_source, h_dest, ntohs(l2->etype), - l2->vlan1, l2->vlan2, entry->ib1, ib2, - acct ? acct->packets : 0, acct ? acct->bytes : 0); + l2->vlan1, l2->vlan2, entry->ib1, ib2); } return 0; --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload1.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Felix Fietkau + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mtk_eth_soc.h" +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +#include "mtk_wed.h" +#endif + +static struct timer_list ag_timer; +static void *ag_timer_eth = NULL; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +static void mtk_ppe_account_group_walk(unsigned long ignore) +#else +static void mtk_ppe_account_group_walk(struct timer_list *ignore) +#endif +{ + u32 i; + unsigned long long bytes, packets; + struct mtk_ppe_account_group *ag; + struct mtk_eth *eth = (struct mtk_eth *)ag_timer_eth; + int (*func)(unsigned int, unsigned long, unsigned long, unsigned int *, unsigned int *, int, unsigned long); + for (i = 1; i < 64; i++) { + ag = mtk_ppe_account_group_get(i); + if (ag->state == MTK_FOE_STATE_BIND) { + bytes = mtk_r32(eth, 0x2000 + i * 16); + bytes += ((unsigned long long)mtk_r32(eth, 0x2000 + i * 16 + 4)) << 32; + packets = mtk_r32(eth, 0x2000 + i * 16 + 8); + if (bytes > 0 || packets > 0) { + ag->jiffies = jiffies; + ag->bytes += bytes; + ag->packets += packets; + } + ag->speed_bytes[(jiffies/HZ/2) % 4] += (unsigned int)bytes; + ag->speed_packets[(jiffies/HZ/2) % 4] += (unsigned int)packets; + + if ((func = ag->priv) != NULL && (((jiffies/HZ) % 2 == 0 && i % 2 == 0) || ((jiffies/HZ) % 2 == 1 && i % 2 == 1)) ) { + struct mtk_foe_entry *entry = mtk_foe_get_entry(eth->ppe[0], ag->hash); + if (FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) == MTK_FOE_STATE_BIND && bytes > 0 && packets > 0) { + bytes = ag->bytes; + packets = ag->packets; + if (func(ag->hash, bytes, packets, ag->speed_bytes, ag->speed_packets, 1, jiffies) != 0) { + mtk_flow_offload_foe_delete(eth, ag->hash); + ag->state = MTK_FOE_STATE_INVALID; + } + ag->bytes = 0; + ag->packets = 0; + } else { + ag->priv = NULL; + } + } + + if (time_before(ag->jiffies + 15 * HZ, jiffies)) { + ag->state = MTK_FOE_STATE_INVALID; + } + } else if (ag->state == MTK_FOE_STATE_FIN) { + if (time_before(ag->jiffies + 15 * HZ, jiffies)) { + ag->state = MTK_FOE_STATE_INVALID; + } + } + } + + mod_timer(&ag_timer, jiffies + HZ * 1); +} + +static void mtk_ppe_account_group_walk_stop(void) +{ + u32 i; + struct mtk_ppe_account_group *ag; + for (i = 1; i < 64; i++) { + ag = mtk_ppe_account_group_get(i); + if (ag->state == MTK_FOE_STATE_BIND) { + ag->state = MTK_FOE_STATE_INVALID; + } + } +} + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +static int +mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) +{ + struct net_device_path_stack stack; + struct net_device_path *path; + int err; + + if (!dev) + return -ENODEV; + + if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) + return -1; + + err = dev_fill_forward_path(dev, addr, &stack); + if (err) + return err; + + path = &stack.path[stack.num_paths - 1]; + if (path->type != DEV_PATH_MTK_WDMA) + return -1; + + info->wdma_idx = path->mtk_wdma.wdma_idx; + info->queue = path->mtk_wdma.queue; + info->bss = path->mtk_wdma.bss; + info->wcid = path->mtk_wdma.wcid; + info->amsdu = path->mtk_wdma.amsdu; + + return 0; +} +#endif + +static int +mtk_offload_prepare_v4(struct mtk_eth *eth, struct mtk_foe_entry *entry, + flow_offload_tuple_t *s_tuple, + flow_offload_tuple_t *d_tuple, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest) +{ + int pse_port = PSE_GDM1_PORT; + + if (dest->dev == eth->netdev[1]) + pse_port = PSE_GDM2_PORT; + else if (dest->dev == eth->netdev[2]) + pse_port = PSE_GDM3_PORT; + + pse_port = (dest->dev->netdev_ops->ndo_flow_offload ? pse_port : PSE_ADMA_PORT); + if (dest->flags & FLOW_OFFLOAD_PATH_EXTDEV) + pse_port = PSE_ADMA_PORT; + + mtk_foe_entry_prepare(eth, entry, MTK_PPE_PKT_TYPE_IPV4_HNAPT, s_tuple->l4proto, + pse_port, dest->eth_src, dest->eth_dest); + mtk_foe_entry_set_ipv4_tuple(eth, entry, false, + s_tuple->src_v4.s_addr, s_tuple->src_port, + s_tuple->dst_v4.s_addr, s_tuple->dst_port); + mtk_foe_entry_set_ipv4_tuple(eth, entry, true, + d_tuple->dst_v4.s_addr, d_tuple->dst_port, + d_tuple->src_v4.s_addr, d_tuple->src_port); + + if (dest->flags & FLOW_OFFLOAD_PATH_PPPOE) + mtk_foe_entry_set_pppoe(eth, entry, dest->pppoe_sid); + + if (dest->flags & FLOW_OFFLOAD_PATH_VLAN) + mtk_foe_entry_set_vlan(eth, entry, dest->vlan_id); + + if (dest->flags & FLOW_OFFLOAD_PATH_BRIDGE) + mtk_foe_entry_clear_ttl(eth, entry); + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (!(dest->flags & FLOW_OFFLOAD_PATH_WED_DIS) && !dest->dev->netdev_ops->ndo_flow_offload && dest->dev->netdev_ops->ndo_fill_forward_path) { + int err; + struct mtk_wdma_info info = {}; + + if (mtk_flow_get_wdma_info(dest->dev, dest->eth_dest, &info) == 0) { + mtk_foe_entry_set_wdma(eth, entry, info.wdma_idx, info.queue, info.bss, info.wcid, info.amsdu); + if (mtk_is_netsys_v2_or_greater(eth)) { + switch (info.wdma_idx) { + case 0: + pse_port = PSE_WDMA0_PORT; + break; + case 1: + pse_port = PSE_WDMA1_PORT; + break; + default: + return -EINVAL; + } + } else { + pse_port = PSE_PPE0_PORT; + } + + if (info.wdma_idx >= 0 && (err = mtk_wed_flow_add_nolock(info.wdma_idx)) < 0) + return err; + } + } +#endif + + if (dest->dsa_port != 0xffff) { + mtk_foe_entry_set_dsa(eth, entry, dest->dsa_port); + if (pse_port == PSE_GDM1_PORT || pse_port == PSE_GDM2_PORT || pse_port == PSE_GDM3_PORT) + mtk_foe_entry_set_queue(eth, entry, 3 + (pse_port == 1 ? 0 : 6) + dest->dsa_port); + } else { + if (pse_port == PSE_GDM1_PORT || pse_port == PSE_GDM2_PORT || pse_port == PSE_GDM3_PORT) + mtk_foe_entry_set_queue(eth, entry, pse_port - 1); + } + + mtk_foe_entry_set_pse_port(eth, entry, pse_port); + + return 0; +} + +static int +mtk_offload_prepare_v6(struct mtk_eth *eth, struct mtk_foe_entry *entry, + flow_offload_tuple_t *s_tuple, + flow_offload_tuple_t *d_tuple, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest) +{ + int pse_port = PSE_GDM1_PORT; + + if (dest->dev == eth->netdev[1]) + pse_port = PSE_GDM2_PORT; + else if (dest->dev == eth->netdev[2]) + pse_port = PSE_GDM3_PORT; + + pse_port = (dest->dev->netdev_ops->ndo_flow_offload ? pse_port : PSE_ADMA_PORT); + if (dest->flags & FLOW_OFFLOAD_PATH_EXTDEV) + pse_port = PSE_ADMA_PORT; + + mtk_foe_entry_prepare(eth, entry, MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T, s_tuple->l4proto, + pse_port, dest->eth_src, dest->eth_dest); + mtk_foe_entry_set_ipv6_tuple(eth, entry, + s_tuple->src_v6.s6_addr32, s_tuple->src_port, + s_tuple->dst_v6.s6_addr32, s_tuple->dst_port); + + if (dest->flags & FLOW_OFFLOAD_PATH_PPPOE) + mtk_foe_entry_set_pppoe(eth, entry, dest->pppoe_sid); + + if (dest->flags & FLOW_OFFLOAD_PATH_VLAN) + mtk_foe_entry_set_vlan(eth, entry, dest->vlan_id); + + if (dest->flags & FLOW_OFFLOAD_PATH_BRIDGE) + mtk_foe_entry_clear_ttl(eth, entry); + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (!(dest->flags & FLOW_OFFLOAD_PATH_WED_DIS) && !dest->dev->netdev_ops->ndo_flow_offload && dest->dev->netdev_ops->ndo_fill_forward_path) { + int err; + struct mtk_wdma_info info = {}; + + if (mtk_flow_get_wdma_info(dest->dev, dest->eth_dest, &info) == 0) { + mtk_foe_entry_set_wdma(eth, entry, info.wdma_idx, info.queue, info.bss, info.wcid, info.amsdu); + if (mtk_is_netsys_v2_or_greater(eth)) { + switch (info.wdma_idx) { + case 0: + pse_port = PSE_WDMA0_PORT; + break; + case 1: + pse_port = PSE_WDMA1_PORT; + break; + case 2: + pse_port = PSE_WDMA2_PORT; + break; + default: + return -EINVAL; + } + } else { + pse_port = PSE_PPE0_PORT; + } + + if (info.wdma_idx >= 0 && (err = mtk_wed_flow_add_nolock(info.wdma_idx)) < 0) + return err; + } + } +#endif + + if (dest->dsa_port != 0xffff) { + mtk_foe_entry_set_dsa(eth, entry, dest->dsa_port); + if (pse_port == PSE_GDM1_PORT || pse_port == PSE_GDM2_PORT || pse_port == PSE_GDM3_PORT) + mtk_foe_entry_set_queue(eth, entry, 3 + (pse_port == 1 ? 0 : 6) + dest->dsa_port); + } else { + if (pse_port == PSE_GDM1_PORT || pse_port == PSE_GDM2_PORT || pse_port == PSE_GDM3_PORT) + mtk_foe_entry_set_queue(eth, entry, pse_port - 1); + } + + mtk_foe_entry_set_pse_port(eth, entry, pse_port); + + return 0; +} + +int mtk_flow_offload_add(struct mtk_eth *eth, flow_offload_type_t type, + flow_offload_t *flow, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest) +{ + flow_offload_tuple_t *otuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple; + flow_offload_tuple_t *rtuple = &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple; + struct mtk_foe_entry orig, reply; + int ohash, rhash; + u32 timestamp; + u32 ag_idx; + struct mtk_ppe_account_group *ag; + + if (otuple->l4proto != IPPROTO_TCP && otuple->l4proto != IPPROTO_UDP) + return -EINVAL; + + if (type == FLOW_OFFLOAD_DEL) { + rhash = (unsigned long)flow->timeout; + ohash = rhash >> 16; + rhash &= 0xffff; + mtk_foe_entry_clear(eth->ppe[0], ohash); + mtk_foe_entry_clear(eth->ppe[0], rhash); + rcu_assign_pointer(eth->foe_flow_table[ohash], NULL); + rcu_assign_pointer(eth->foe_flow_table[rhash], NULL); + return 0; + } + + switch (otuple->l3proto) { + case AF_INET: + if (mtk_offload_prepare_v4(eth, &orig, otuple, rtuple, src, dest) || + mtk_offload_prepare_v4(eth, &reply, rtuple, otuple, dest, src)) + return -EINVAL; + break; + case AF_INET6: + if (mtk_offload_prepare_v6(eth, &orig, otuple, rtuple, src, dest) || + mtk_offload_prepare_v6(eth, &reply, rtuple, otuple, dest, src)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + timestamp = mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth); + + ohash = mtk_foe_entry_commit(eth->ppe[0], &orig, timestamp, ((flow->timeout >> 16) & 0xffff)); + if (ohash < 0) + return -EINVAL; + + rhash = mtk_foe_entry_commit(eth->ppe[0], &reply, timestamp, ((flow->timeout >> 0) & 0xffff)); + if (rhash < 0) { + mtk_foe_entry_clear(eth->ppe[0], ohash); + return -EINVAL; + } + + if (mtk_is_netsys_v1(eth)) { + //sync ag hash with foe hash + ag_idx = FIELD_GET(MTK_FOE_IB2_PORT_AG, orig.ipv4.ib2); + ag = mtk_ppe_account_group_get(ag_idx); + if (ag) { + ag->priv = NULL; + ag->hash = ohash; + ag->state = MTK_FOE_STATE_BIND; + } + + ag_idx = FIELD_GET(MTK_FOE_IB2_PORT_AG, reply.ipv4.ib2); + ag = mtk_ppe_account_group_get(ag_idx); + if (ag) { + ag->priv = NULL; + ag->hash = rhash; + ag->state = MTK_FOE_STATE_BIND; + } + } + + rcu_assign_pointer(eth->foe_flow_table[ohash], flow); + rcu_assign_pointer(eth->foe_flow_table[rhash], flow); + + /* XXX: also the same was set in natflow + rhash |= ohash << 16; + flow->timeout = (void *)(unsigned long)rhash; + */ + + return 0; +} + +static struct mtk_eth *ppe_eth = NULL; +static int mtk_flow_offload_add_extdev(flow_offload_type_t type, + flow_offload_t *flow, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest) +{ + if (ppe_eth) { + return mtk_flow_offload_add(ppe_eth, type, flow, src, dest); + } + + return -EINVAL; +} + +void mtk_flow_offload_foe_delete(struct mtk_eth *eth, u16 hash) +{ + mtk_foe_entry_clear(eth->ppe[0], hash); + rcu_assign_pointer(eth->foe_flow_table[hash], NULL); +} + +void mtk_flow_offload_stop(void) +{ + int i; + struct mtk_eth *eth = (struct mtk_eth *)ag_timer_eth; + + if (eth) { + for (i = 0; i < MTK_PPE_ENTRIES; i++) { + rcu_assign_pointer(eth->foe_flow_table[i], NULL); + } + mtk_ppe_account_group_walk_stop(); + } +} + +void mtk_offload_keepalive(struct mtk_eth *eth, unsigned int hash) +{ + flow_offload_t *flow; + + rcu_read_lock(); + flow = rcu_dereference(eth->foe_flow_table[hash]); + if (flow) { + int (*func)(unsigned int, unsigned long, unsigned long, unsigned int *, unsigned int *, int, unsigned long); + func = (void *)flow->priv; + if (mtk_is_netsys_v2_or_greater(eth) && func) { + if ( (((jiffies/HZ) % 2 == 0 && (hash/4) % 2 == 0) || ((jiffies/HZ) % 2 == 1 && (hash/4) % 2 == 1)) ) { + struct mtk_foe_accounting diff = {}; + mtk_ppe_read_mib(eth->ppe[0], hash, &diff); + if (func(hash, diff.bytes, diff.packets, NULL, NULL, 2, jiffies) != 0) { + mtk_flow_offload_foe_delete(eth, hash); + } + } + } else if (func) { + struct mtk_foe_entry *entry = mtk_foe_get_entry(eth->ppe[0], hash); + u32 ag_idx = FIELD_GET(MTK_FOE_IB2_PORT_AG, entry->ipv4.ib2); + struct mtk_ppe_account_group *ag = mtk_ppe_account_group_get(ag_idx); + if (ag && ag->state == MTK_FOE_STATE_BIND && ag->hash == hash && ag->priv != func) { + unsigned long bytes = ag->bytes; + unsigned long packets = ag->packets; + if (func(hash, bytes, packets, ag->speed_bytes, ag->speed_packets, 1, jiffies) != 0) { + mtk_flow_offload_foe_delete(eth, hash); + ag->state = MTK_FOE_STATE_INVALID; + } + ag->bytes -= bytes; + ag->packets -= packets; + if (ag->priv != (void *)func) + ag->priv = func; + } else { + if (func(hash, 0, 0, NULL, NULL, 1, jiffies) != 0) { + mtk_flow_offload_foe_delete(eth, hash); + } + } + } + } + rcu_read_unlock(); +} + +int mtk_offload_check_rx(struct mtk_eth *eth, struct sk_buff *skb, u32 rxd4) +{ + unsigned int hash; + + switch (FIELD_GET(MTK_RXD4_PPE_CPU_REASON, rxd4)) { + case MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR: + case MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR: + case MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR: + hash = FIELD_GET(MTK_RXD4_FOE_ENTRY, rxd4); + if (hash != MTK_RXD4_FOE_ENTRY) + mtk_offload_keepalive(eth, hash); + return -1; + case MTK_PPE_CPU_REASON_PACKET_SAMPLING: + return -1; + case MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU: + hash = FIELD_GET(MTK_RXD4_FOE_ENTRY, rxd4); + skb_set_hash(skb, (HWNAT_QUEUE_MAPPING_MAGIC | hash), PKT_HASH_TYPE_L4); + skb->vlan_tci |= HWNAT_QUEUE_MAPPING_MAGIC; + skb->pkt_type = PACKET_HOST; + skb->protocol = eth_hdr(skb)->h_proto; /* fixup protocol */ + return 1; + default: + return 0; + } +} + +int mtk_offload_check_rx_v2(struct mtk_eth *eth, struct sk_buff *skb, u32 rxd5) +{ + unsigned int hash; + + switch (FIELD_GET(MTK_RXD5_PPE_CPU_REASON, rxd5)) { + case MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR: + case MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR: + case MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR: + hash = FIELD_GET(MTK_RXD5_FOE_ENTRY, rxd5); + if (hash != MTK_RXD5_FOE_ENTRY) + mtk_offload_keepalive(eth, hash); + return -1; + case MTK_PPE_CPU_REASON_PACKET_SAMPLING: + return -1; + case MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU: + hash = FIELD_GET(MTK_RXD5_FOE_ENTRY, rxd5); + skb_set_hash(skb, (HWNAT_QUEUE_MAPPING_MAGIC | hash), PKT_HASH_TYPE_L4); + skb->vlan_tci |= HWNAT_QUEUE_MAPPING_MAGIC; + skb->pkt_type = PACKET_HOST; + skb->protocol = eth_hdr(skb)->h_proto; /* fixup protocol */ + return 1; + default: + return 0; + } +} + +int mtk_eth_offload_init(struct mtk_eth *eth) +{ + eth->foe_flow_table = devm_kcalloc(eth->dev, MTK_PPE_ENTRIES, + sizeof(*eth->foe_flow_table), + GFP_KERNEL); + if (!eth->foe_flow_table) + return -ENOMEM; + if (mtk_is_netsys_v1(eth)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + init_timer(&ag_timer); + ag_timer.data = 0; + ag_timer.function = mtk_ppe_account_group_walk; +#else + timer_setup(&ag_timer, mtk_ppe_account_group_walk, 0); +#endif + ag_timer_eth = eth; + mod_timer(&ag_timer, jiffies + 8 * HZ); + } + + if (mtk_is_netsys_v2_or_greater(eth)) { + ppe_eth = eth; + flow_offload_add_extdev = mtk_flow_offload_add_extdev; + } + + return 0; +} + +void mtk_eth_offload_exit(struct mtk_eth *eth) +{ + if (mtk_is_netsys_v2_or_greater(eth)) { + flow_offload_add_extdev = NULL; + } + + if (mtk_is_netsys_v1(eth)) + del_timer(&ag_timer); + if (eth->foe_flow_table) { + devm_kfree(eth->dev, eth->foe_flow_table); + } +} --- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h @@ -171,4 +171,9 @@ enum { #define MTK_PPE_SBW_CTRL 0x374 +#define MTK_PPE_MIB_CAH_TAG_SRH 0X354 +#define MTK_PPE_MIB_CAH_LINE_RW 0X358 +#define MTK_PPE_MIB_CAH_WDATA 0X35c +#define MTK_PPE_MIB_CAH_RDATA 0X360 + #endif --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -1982,13 +1982,21 @@ mtk_wed_ppe_check(struct mtk_wed_device if (!skb) return; + switch (reason) { + case MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR: + case MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR: + case MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR: + mtk_offload_keepalive(eth, hash); + break; + } + if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) return; skb_set_mac_header(skb, 0); eh = eth_hdr(skb); skb->protocol = eh->h_proto; - mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); + //mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); } static void @@ -2655,6 +2663,33 @@ out: return ret; } +int mtk_wed_flow_add_nolock(int index) +{ + struct mtk_wed_hw *hw = hw_list[index]; + int ret = 0; + + if (!hw || !hw->wed_dev) { + ret = -ENODEV; + goto out; + } + + if (!hw->wed_dev->wlan.offload_enable) + goto out; + + if (hw->num_flows) { + hw->num_flows = 1; + goto out; + } + + ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); + if (!ret) + hw->num_flows++; + mtk_wed_set_ext_int(hw->wed_dev, true); + +out: + return ret; +} + void mtk_wed_flow_remove(int index) { struct mtk_wed_hw *hw = hw_list[index]; @@ -2677,6 +2712,7 @@ out: mutex_unlock(&hw_lock); } +#if 0 static int mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { @@ -2769,6 +2805,25 @@ mtk_wed_setup_tc(struct mtk_wed_device * return -EOPNOTSUPP; } } +#endif +static int +mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, + enum tc_setup_type type, void *type_data) +{ + struct mtk_wed_hw *hw = wed->hw; + + if (hw->version < 2) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_BLOCK: + case TC_SETUP_FT: + return 0; + default: + return -EOPNOTSUPP; + } + return 0; +} void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, void __iomem *wdma, phys_addr_t wdma_phy, --- a/drivers/net/ethernet/mediatek/mtk_wed.h +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -82,6 +82,8 @@ static inline bool mtk_wed_is_v3_or_grea return hw->version > 2; } +extern void mtk_offload_keepalive(struct mtk_eth *eth, unsigned int hash); + static inline void wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) { @@ -184,6 +186,7 @@ void mtk_wed_add_hw(struct device_node * int index); void mtk_wed_exit(void); int mtk_wed_flow_add(int index); +int mtk_wed_flow_add_nolock(int index); void mtk_wed_flow_remove(int index); void mtk_wed_fe_reset(void); void mtk_wed_fe_reset_complete(void); @@ -202,6 +205,10 @@ static inline int mtk_wed_flow_add(int i { return -EINVAL; } +static inline int mtk_wed_flow_add_nolock(int index) +{ + return -EINVAL; +} static inline void mtk_wed_flow_remove(int index) { } --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -53,6 +53,8 @@ #include #include +#include + #define PPP_VERSION "2.4.2" /* @@ -1597,6 +1599,26 @@ static int ppp_fill_forward_path(struct return chan->ops->fill_forward_path(ctx, path, chan); } +static int ppp_flow_offload_check(flow_offload_hw_path_t *path) +{ + struct ppp *ppp = netdev_priv(path->dev); + struct ppp_channel *chan; + struct channel *pch; + + if (ppp->flags & SC_MULTILINK) + return -EOPNOTSUPP; + + if (list_empty(&ppp->channels)) + return -ENODEV; + + pch = list_first_entry(&ppp->channels, struct channel, clist); + chan = pch->chan; + if (!chan->ops->flow_offload_check) + return -EOPNOTSUPP; + + return chan->ops->flow_offload_check(chan, path); +} + static const struct net_device_ops ppp_netdev_ops = { .ndo_init = ppp_dev_init, .ndo_uninit = ppp_dev_uninit, @@ -1604,6 +1626,7 @@ static const struct net_device_ops ppp_n .ndo_siocdevprivate = ppp_net_siocdevprivate, .ndo_get_stats64 = ppp_get_stats64, .ndo_fill_forward_path = ppp_fill_forward_path, + .ndo_flow_offload_check = ppp_flow_offload_check, }; static struct device_type ppp_type = { --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -73,6 +73,8 @@ #include #include +#include + #include #include #include @@ -995,9 +997,30 @@ static int pppoe_fill_forward_path(struc return 0; } +static int pppoe_flow_offload_check(struct ppp_channel *chan, + flow_offload_hw_path_t *path) +{ + struct sock *sk = (struct sock *)chan->private; + struct pppox_sock *po = pppox_sk(sk); + struct net_device *dev = po->pppoe_dev; + + if (sock_flag(sk, SOCK_DEAD) || + !(sk->sk_state & PPPOX_CONNECTED) || !dev) + return -ENODEV; + + path->flags |= FLOW_OFFLOAD_PATH_PPPOE; + path->dev = dev; + + if (path->dev->netdev_ops->ndo_flow_offload_check) + return path->dev->netdev_ops->ndo_flow_offload_check(path); + + return 0; +} + static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, .fill_forward_path = pppoe_fill_forward_path, + .flow_offload_check = pppoe_flow_offload_check, }; static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1034,6 +1034,25 @@ struct dev_ifalias { struct devlink; struct tlsdev_ops; +#define NATFLOW_OFFLOAD_HWNAT_FAKE +struct flow_offload_fake; +struct flow_offload_tuple_fake; +struct flow_offload_hw_path_fake; +enum flow_offload_type_fake { + FLOW_OFFLOAD_ADD = 0, + FLOW_OFFLOAD_DEL, +}; + +typedef struct flow_offload_fake flow_offload_t; +typedef struct flow_offload_tuple_fake flow_offload_tuple_t; +typedef struct flow_offload_hw_path_fake flow_offload_hw_path_t; +typedef enum flow_offload_type_fake flow_offload_type_t; + +extern int (*flow_offload_add_extdev)(flow_offload_type_t type, + flow_offload_t *flow, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest); + struct netdev_name_node { struct hlist_node hlist; struct list_head list; @@ -1560,6 +1579,11 @@ struct net_device_ops { int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, u16 flags); + int (*ndo_flow_offload_check)(flow_offload_hw_path_t *path); + int (*ndo_flow_offload)(flow_offload_type_t type, + flow_offload_t *flow, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest); int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -31,6 +31,7 @@ struct ppp_channel_ops { int (*fill_forward_path)(struct net_device_path_ctx *, struct net_device_path *, const struct ppp_channel *); + int (*flow_offload_check)(struct ppp_channel *, flow_offload_hw_path_t *); }; struct ppp_channel { --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -176,6 +176,60 @@ struct flow_offload { struct rcu_head rcu_head; }; +#define FLOW_OFFLOAD_PATH_ETHERNET BIT(0) +#define FLOW_OFFLOAD_PATH_VLAN BIT(1) +#define FLOW_OFFLOAD_PATH_PPPOE BIT(2) +#define FLOW_OFFLOAD_PATH_DSA BIT(3) +#define FLOW_OFFLOAD_PATH_STOP BIT(4) +#define FLOW_OFFLOAD_PATH_EXTDEV BIT(5) +#define FLOW_OFFLOAD_PATH_WED_DIS BIT(6) +#define FLOW_OFFLOAD_PATH_BRIDGE BIT(7) +#define FLOW_OFFLOAD_PATH_DEL BIT(8) + +struct flow_offload_tuple_fake { + union { + struct in_addr src_v4; + struct in6_addr src_v6; + }; + union { + struct in_addr dst_v4; + struct in6_addr dst_v6; + }; + struct { + __be16 src_port; + __be16 dst_port; + }; + + u8 l3proto; + u8 l4proto; +}; + +struct flow_offload_tuple_rhash_fake { + struct flow_offload_tuple_fake tuple; +}; + +struct flow_offload_fake { + struct flow_offload_tuple_rhash_fake tuplehash[FLOW_OFFLOAD_DIR_MAX]; + u32 flags; + u32 timeout; + union { + /* Your private driver data here. */ + void *priv; + }; +}; + +struct flow_offload_hw_path_fake { + struct net_device *dev; + u32 flags; + + u8 eth_src[ETH_ALEN]; + u8 eth_dest[ETH_ALEN]; + u16 vlan_proto; + u16 vlan_id; + u16 pppoe_sid; + u16 dsa_port; +}; + #define NF_FLOW_TIMEOUT (30 * HZ) #define nf_flowtable_time_stamp (u32)jiffies --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "vlan.h" #include "vlanproc.h" @@ -809,6 +810,23 @@ static int vlan_dev_fill_forward_path(st return 0; } +static int vlan_dev_flow_offload_check(flow_offload_hw_path_t *path) +{ + struct net_device *dev = path->dev; + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + + if (path->flags & FLOW_OFFLOAD_PATH_VLAN) + return -EEXIST; + + path->flags |= FLOW_OFFLOAD_PATH_VLAN; + path->dev = vlan->real_dev; + + if (vlan->real_dev->netdev_ops->ndo_flow_offload_check) + return vlan->real_dev->netdev_ops->ndo_flow_offload_check(path); + + return 0; +} + static const struct ethtool_ops vlan_ethtool_ops = { .get_link_ksettings = vlan_ethtool_get_link_ksettings, .get_drvinfo = vlan_ethtool_get_drvinfo, @@ -848,6 +866,7 @@ static const struct net_device_ops vlan_ .ndo_fix_features = vlan_dev_fix_features, .ndo_get_iflink = vlan_dev_get_iflink, .ndo_fill_forward_path = vlan_dev_fill_forward_path, + .ndo_flow_offload_check = vlan_dev_flow_offload_check, }; static void vlan_dev_free(struct net_device *dev) --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "br_private.h" @@ -437,6 +438,26 @@ static int br_fill_forward_path(struct n return 0; } +static int br_flow_offload_check(flow_offload_hw_path_t *path) +{ + struct net_device *dev = path->dev; + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_fdb_entry *dst; + + if (!(path->flags & FLOW_OFFLOAD_PATH_ETHERNET)) + return -EINVAL; + + dst = br_fdb_find_rcu(br, path->eth_dest, path->vlan_id); + if (!dst || !dst->dst) + return -ENOENT; + + path->dev = dst->dst->dev; + if (path->dev->netdev_ops->ndo_flow_offload_check) + return path->dev->netdev_ops->ndo_flow_offload_check(path); + + return 0; +} + static const struct ethtool_ops br_ethtool_ops = { .get_drvinfo = br_getinfo, .get_link = ethtool_op_get_link, @@ -472,6 +493,7 @@ static const struct net_device_ops br_ne .ndo_bridge_dellink = br_dellink, .ndo_features_check = passthru_features_check, .ndo_fill_forward_path = br_fill_forward_path, + .ndo_flow_offload_check = br_flow_offload_check, }; static struct device_type br_type = { --- a/net/core/dev.c +++ b/net/core/dev.c @@ -172,6 +172,12 @@ static int call_netdevice_notifiers_exta struct netlink_ext_ack *extack); static struct napi_struct *napi_by_id(unsigned int napi_id); +int (*flow_offload_add_extdev)(flow_offload_type_t type, + flow_offload_t *flow, + flow_offload_hw_path_t *src, + flow_offload_hw_path_t *dest) = NULL; +EXPORT_SYMBOL(flow_offload_add_extdev); + /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl * semaphore. --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "dsa_priv.h" @@ -1743,6 +1744,25 @@ static int dsa_slave_fill_forward_path(s return 0; } +static int dsa_flow_offload_check(flow_offload_hw_path_t *path) +{ + struct net_device *dev = path->dev; + struct dsa_port *dp; + + if (!(path->flags & FLOW_OFFLOAD_PATH_ETHERNET)) + return -EINVAL; + + dp = dsa_slave_to_port(dev); + path->dsa_port = dp->index; + path->dev = dsa_slave_to_master(dev); + path->flags |= FLOW_OFFLOAD_PATH_DSA; + + if (path->dev->netdev_ops->ndo_flow_offload_check) + return path->dev->netdev_ops->ndo_flow_offload_check(path); + + return 0; +} + static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_open = dsa_slave_open, .ndo_stop = dsa_slave_close, @@ -1767,6 +1787,7 @@ static const struct net_device_ops dsa_s .ndo_get_devlink_port = dsa_slave_get_devlink_port, .ndo_change_mtu = dsa_slave_change_mtu, .ndo_fill_forward_path = dsa_slave_fill_forward_path, + .ndo_flow_offload_check = dsa_flow_offload_check, }; static struct device_type dsa_type = {